diff --git a/quimb/tensor/block_gen.py b/quimb/tensor/block_gen.py new file mode 100644 index 00000000..44a32e0d --- /dev/null +++ b/quimb/tensor/block_gen.py @@ -0,0 +1,403 @@ +import numpy as np +from itertools import product + +from ..gen.rand import randn, seed_rand +from .block_interface import dispatch_settings, get_symmetry + +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.fermion import SparseFermionTensor +from pyblock3.algebra.symmetry import BondInfo + +def backend_wrapper(func): + def new_func(*args, **kwargs): + T = func(*args, **kwargs) + use_cpp = dispatch_settings("use_cpp") + if use_cpp: + T = T.to_flat() + return T + return new_func + +def _dispatch_dq(dq, symmetry): + '''Construct pyblock3 fermion symmetry object + + Parameters + ---------- + dq : int or tuple of integers + Quantum particle number(s) + symmetry : fermion symmetry class + + Returns + ------- + Fermion symmetry object + ''' + if dq is None: + dq = (0, ) + elif isinstance(dq, (int, np.integer, np.float)): + dq = (int(dq), ) + dq = symmetry(*dq) + return dq + +@backend_wrapper +def rand_single_block(shape, dtype=float, seed=None, + pattern=None, dq=None, ind=None): + '''Construct random block tensor with one block + + Parameters + ---------- + shape : tuple or list of integers + shape for the single block + dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional + The underlying data type. + seed : int, optional + A random seed. + pattern : string consisting of ("+", "-"), optional + The symmetry pattern for each dimension + dq : int or tuple of integers, optional + The net particle number(s) in this tensor, default is 0 + ind: int, optional + The axis to dispatch the dq symmetry + + Returns + ------- + Block tensor + ''' + if seed is not None: + np.random.seed(seed) + symmetry = get_symmetry() + dq = _dispatch_dq(dq, symmetry) + if pattern is None: + pattern = "-" * (len(shape)-1) + "+" + if ind is None: + try: + ind = pattern.index("+") + except: + ind = 0 + if pattern[ind] == "-": + dq = - dq + q_labels = [dq if ix==ind else symmetry(0) for ix in range(len(shape))] + array = randn(shape, dtype=dtype) + blk = SubTensor(reduced=array, q_labels=q_labels) + T = SparseFermionTensor(blocks=[blk, ], pattern=pattern) + return T + +@backend_wrapper +def ones_single_block(shape, pattern=None, dq=None, ind=None): + '''Construct block tensor filled with ones with a single block + + Parameters + ---------- + shape : tuple or list of integers + shape for the single block + pattern : string consisting of ("+", "-"), optional + The symmetry pattern for each dimension + dq : int or tuple of integers, optional + The net particle number(s) in this tensor, default is 0 + ind: int, optional + The axis to dispatch the dq symmetry + + Returns + ------- + Block tensor + ''' + symmetry = get_symmetry() + dq = _dispatch_dq(dq, symmetry) + if pattern is None: + pattern = "-" * (len(shape)-1) + "+" + if ind is None: + try: + ind = pattern.index("+") + except: + ind = 0 + if pattern[ind] == "-": + dq = - dq + q_labels = [dq if ix==ind else symmetry(0) for ix in range(len(shape))] + array = np.ones(shape) + blk = SubTensor(reduced=array, q_labels=q_labels) + T = SparseFermionTensor(blocks=[blk, ], pattern=pattern) + return T + +@backend_wrapper +def rand_all_blocks(shape, symmetry_info, dtype=float, + seed=None, pattern=None, dq=None): + '''Construct block tensor with specified blocks + + Parameters + ---------- + shape : tuple or list of integers + shape for all blocks + symmetry_info: tuple / list of tuple/list of integers + allowed quantum numbers for each dimension, eg, [(0,1),(0,1),(0,1,2)] + means allowed quantum numbers for the three dimensions are + (0,1), (0,1) and (0,1,2) respectively. For U1 \otimes U1 symmetry, + this could be [((0,0), (1,1), (1,-1)), ((0,0), (1,1), (1,-1)), + (0,0),(1,1),(1,-1),(2,0)] where each tuple denotes a particle + number and SZ number pair + dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional + The underlying data type. + seed : int, optional + A random seed. + pattern : string consisting of ("+", "-"), optional + The symmetry pattern for each dimension + dq : int or tuple of integers, optional + The net particle number(s) in this tensor, default is 0 + + Returns + ------- + Block tensor + ''' + if seed is not None: + np.random.seed(seed) + symmetry = get_symmetry() + dq = _dispatch_dq(dq, symmetry) + bond_infos = [] + for sh, ibonds in zip(shape, symmetry_info): + bonds = [] + for ibond in ibonds: + if isinstance(ibond, (int, np.integer)): + bonds.append(symmetry(ibond)) + else: + bonds.append(symmetry(*ibond)) + bonds = dict(zip(bonds, [sh,]*len(bonds))) + bond_infos.append(BondInfo(bonds)) + T = SparseFermionTensor.random(bond_infos, pattern=pattern, dq=dq, dtype=dtype) + return T + +def gen_2d_bonds(*args): + symmetry = dispatch_settings("symmetry") + func = {"U1": gen_2d_bonds_u1, + "Z2": gen_2d_bonds_z2, + "Z22": gen_2d_bonds_z22, + "U11": gen_2d_bonds_u11}[symmetry] + return func(*args) + +def gen_2d_bonds_z2(pnarray, physical_infos): + r'''Construct Z2 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + pnarray : array_like + Net Z2 symmetry for each site + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + number of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], int] + A dictionary mapping the site coordinates to the net Z2 symmetry + on that site + ''' + Lx, Ly = pnarray.shape + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + nvir = (ix != Lx - 1) + (ix != 0) +\ + (iy != Ly - 1) + (iy != 0) + symmetry_infos[ix,iy] = [(0,1)] * nvir + [tuple(physical_infos[ix,iy])] + dq_infos[ix,iy]= pnarray[ix,iy] + return symmetry_infos, dq_infos + +def gen_2d_bonds_z22(n1array, n2array, physical_infos): + r'''Construct Z2 \otimes Z2 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + n1array : array_like + First entry of the net Z2 symmetry pairs for each site + n2array : array_like + Second entry of the net Z2 symmetry pairs for each site + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + number pairs of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple] + A dictionary mapping the site coordinates to the allowed quantum particle + number pairs in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], tuple of integers] + A dictionary mapping the site coordinates to the net quantum particle number + pair on that site + ''' + Lx, Ly = n1array.shape + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + nvir = (ix != Lx - 1) + (ix != 0) +\ + (iy != Ly - 1) + (iy != 0) + symmetry_infos[ix,iy] = [((0,0),(0,1),(1,0),(1,1))] * nvir + [tuple(physical_infos[ix,iy])] + dq_infos[ix,iy]= (n1array[ix,iy], n2array[ix,iy]) + return symmetry_infos, dq_infos + +def gen_2d_bonds_u1(pnarray, physical_infos): + r'''Construct U1 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + pnarray : array_like + The net particle number inflow for each site + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + number of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], int] + A dictionary mapping the site coordinates to the net quantum particle number + on that site + ''' + Lx, Ly = pnarray.shape + s_type = (Lx % 2==0) + vbonds = [[0 for _ in range(Ly)] for _ in range(Lx+1)] + hbonds = [[0 for _ in range(Ly+1)] for _ in range(Lx)] + def _get_bond(ix, iy, *directions): + bond_dict = {"r": hbonds[ix][iy+1], + "l": hbonds[ix][iy], + "u": vbonds[ix+1][iy], + "d": vbonds[ix][iy]} + return [bond_dict[ix] for ix in directions] + + ave = np.sum(pnarray)/pnarray.size + for ix in range(Lx): + sweep_left = (s_type and ix%2==0) or (not s_type and ix%2==1) + if sweep_left: + for iy in range(Ly-1,-1,-1): + if iy ==0: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + right, down, up = _get_bond(ix, iy, "r", "d", "u") + hbonds[ix][iy] = pnarray[ix][iy] + up + right - down - ave + else: + for iy in range(Ly): + if iy ==Ly-1: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + left, up, down = _get_bond(ix, iy, "l", "u", "d") + hbonds[ix][iy+1] = down + left + ave - up - pnarray[ix][iy] + + hbonds = np.asarray(hbonds)[:,1:-1] + vbonds = np.asarray(vbonds)[1:-1] + + def _round_to_bond(bd): + if bd.is_integer(): + ibond = np.rint(bd).astype(int) + return [ibond-1, ibond, ibond+1] + else: + ibond = np.floor(bd).astype(int) + return [ibond, ibond+1] + + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + block = [] + if ix != Lx - 1: # bond up + block.append(_round_to_bond(vbonds[ix,iy])) + if iy != Ly - 1: # bond right + block.append(_round_to_bond(hbonds[ix,iy])) + if ix != 0: # bond down + block.append(_round_to_bond(vbonds[ix-1,iy])) + if iy != 0: # bond left + block.append(_round_to_bond(hbonds[ix,iy-1])) + block.append(physical_infos[ix][iy]) + symmetry_infos[ix,iy] = block + dq_infos[ix,iy]=pnarray[ix,iy] + return symmetry_infos, dq_infos + +def gen_2d_bonds_u11(pnarray, szarray, physical_infos): + r'''Construct U1 \otime U1 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + pnarray : array_like + The net particle number inflow for each site + szarray : array_like + The net SZ number inflow for each site, the parity for each site must be + consistent with pnarray + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers (particle number and SZ number pair) of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple] + A dictionary mapping the site coordinates to the allowed particle number + and SZ number pairs in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], tuple of integers] + A dictionary mapping the site coordinates to the net quantum particle number + and SZ number pair on that site + ''' + + Lx, Ly = pnarray.shape + if not np.allclose(pnarray % 2, szarray % 2): + raise ValueError("parity inconsistent") + if abs(szarray).max()>1: + raise ValueError("net |SZ| >1 not supported yet") + s_type = (Lx % 2==0) + vbonds = [[0 for _ in range(Ly)] for _ in range(Lx+1)] + hbonds = [[0 for _ in range(Ly+1)] for _ in range(Lx)] + def _get_bond(ix, iy, *directions): + bond_dict = {"r": hbonds[ix][iy+1], + "l": hbonds[ix][iy], + "u": vbonds[ix+1][iy], + "d": vbonds[ix][iy]} + return [bond_dict[ix] for ix in directions] + + ave = np.sum(pnarray)/pnarray.size + for ix in range(Lx): + sweep_left = (s_type and ix%2==0) or (not s_type and ix%2==1) + if sweep_left: + for iy in range(Ly-1,-1,-1): + if iy ==0: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + right, down, up = _get_bond(ix, iy, "r", "d", "u") + hbonds[ix][iy] = pnarray[ix][iy] + up + right - down - ave + else: + for iy in range(Ly): + if iy ==Ly-1: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + left, up, down = _get_bond(ix, iy, "l", "u", "d") + hbonds[ix][iy+1] = down + left + ave - up - pnarray[ix][iy] + hbonds = np.asarray(hbonds)[:,1:-1] + vbonds = np.asarray(vbonds)[1:-1] + + def _round_to_bond(bd): + if bd.is_integer(): + ibond = np.rint(bd).astype(int) + if ibond % 2==0: + return [(ibond-1,1),(ibond-1,-1), (ibond,0), (ibond+1,-1), (ibond+1,1)] + else: + return [(ibond-1, 0), (ibond, 1), (ibond, -1), (ibond+1, 0)] + else: + ibond = np.floor(bd).astype(int) + if ibond % 2==0: + return [(ibond,0), (ibond+1,-1), (ibond+1,1)] + else: + return [(ibond, 1), (ibond, -1), (ibond+1, 0)] + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + block = [] + if ix != Lx - 1: # bond up + block.append(_round_to_bond(vbonds[ix,iy])) + if iy != Ly - 1: # bond right + block.append(_round_to_bond(hbonds[ix,iy])) + if ix != 0: # bond down + block.append(_round_to_bond(vbonds[ix-1,iy])) + if iy != 0: # bond left + block.append(_round_to_bond(hbonds[ix,iy-1])) + block.append(physical_infos[ix][iy]) + symmetry_infos[ix,iy] = block + dq_infos[ix,iy]= (pnarray[ix,iy],szarray[ix,iy]) + return symmetry_infos, dq_infos diff --git a/quimb/tensor/block_interface.py b/quimb/tensor/block_interface.py new file mode 100644 index 00000000..119accf9 --- /dev/null +++ b/quimb/tensor/block_interface.py @@ -0,0 +1,58 @@ +import sys +from pyblock3.algebra.fermion_symmetry import U11, U1, Z2, Z4, Z22 +from pyblock3.algebra.symmetry import BondInfo +from pyblock3.algebra.fermion import eye, SparseFermionTensor +from pyblock3.algebra import fermion_setting as setting +from pyblock3.algebra import fermion_ops + +this = sys.modules[__name__] +this.DEFAULT_SYMMETRY = "U1" +this.USE_CPP = True +this.USE_FERMION = True +symmetry_map = setting.symmetry_map + +def set_symmetry(symmetry): + symmetry = symmetry.upper() + if symmetry not in symmetry_map: + raise KeyError("input symmetry %s not supported"%symmetry) + this.DEFAULT_SYMMETRY = symmetry + setting.set_symmetry(symmetry) + +def set_backend(use_cpp): + this.USE_CPP = use_cpp + setting.set_flat(use_cpp) + +def set_fermion(use_fermion): + this.USE_FERMION = use_fermion + setting.set_fermion(use_fermion) + +def set_options(**kwargs): + symmetry = kwargs.pop("symmetry", this.DEFAULT_SYMMETRY) + use_fermion = kwargs.pop("fermion", this.USE_FERMION) + use_cpp = kwargs.pop("use_cpp", this.USE_CPP) + set_symmetry(symmetry) + set_fermion(use_fermion) + set_backend(use_cpp) + +def dispatch_settings(*keys): + dict = {"symmetry": "DEFAULT_SYMMETRY", + "fermion": "USE_FERMION", + "use_cpp": "USE_CPP"} + _settings = [] + for ikey in keys: + if ikey not in dict: + raise KeyError("%s not a valid backend setting"%ikey) + _settings.append(getattr(this, dict[ikey])) + if len(_settings) == 1: + _settings = _settings[0] + return _settings + +def get_symmetry(): + return setting.symmetry_map[this.DEFAULT_SYMMETRY] + +to_exponential = fermion_ops.get_exponential +H1 = fermion_ops.H1 +Hubbard = fermion_ops.Hubbard +onsite_U = fermion_ops.onsite_U +measure_SZ = fermion_ops.measure_SZ +ParticleNumber = fermion_ops.ParticleNumber diff --git a/quimb/tensor/block_tools.py b/quimb/tensor/block_tools.py new file mode 100644 index 00000000..53962043 --- /dev/null +++ b/quimb/tensor/block_tools.py @@ -0,0 +1,63 @@ +from quimb.tensor import block_interface as bitf +import numpy as np + +def apply(T, func): + use_cpp = bitf.dispatch_settings("use_cpp") + if use_cpp: + new_T = T.copy() + new_T.data = func(new_T.data) + else: + new_T = T.copy() + for iblk in new_T: + iblk[:] = func(iblk[:]) + return new_T + +def sqrt(T): + _sqrt = lambda x : x**.5 + return apply(T, _sqrt) + +def inv_with_smudge(T, cutoff=1e-10, gauge_smudge=1e-6): + def _inv_with_smudge(arr): + new_arr = np.zeros(arr.shape, dtype=arr.dtype) + ind = abs(arr) > cutoff + new_arr[ind] = (arr[ind] + gauge_smudge) ** -1 + return new_arr + return apply(T, _inv_with_smudge) + +def add_with_smudge(T, cutoff=1e-10, gauge_smudge=1e-6): + def _add_with_smudge(arr): + ind = abs(arr) > cutoff + arr[ind] += gauge_smudge + return arr + return apply(T, _add_with_smudge) + +def get_smudge_balance(T1, T2, ix, smudge): + flat = bitf.dispatch_settings("use_cpp") + if flat: + t1, t2 = T1.data.to_sparse(), T2.data.to_sparse() + else: + t1, t2 = T1.data, T2.data + sign1 = t1.pattern[T1.inds.index(ix)] + sign2 = t2.pattern[T2.inds.index(ix)] + s1_pattern = {"+":"-+", "-":"+-"}[sign1] + s2_pattern = {"-":"-+", "+":"+-"}[sign2] + + inv = (sign1 == sign2) + block_cls = t1.blocks[0].__class__ + block_dict = {} + for iblk1 in t1: + q0 = iblk1.q_labels[0] + block_dict[q0] = np.diag(np.asarray(iblk1)) + smudge + for iblk2 in t2: + q0 = -iblk2.q_labels[0] if inv else iblk2.q_labels[0] + if q0 not in block_dict: continue + block_dict[q0] = block_dict[q0] / (np.diag(np.asarray(iblk2)) + smudge) + + s1 = [block_cls(reduced=np.diag(s**-0.25), q_labels=(qlab,)*2) for qlab, s in block_dict.items()] + s2 = [block_cls(reduced=np.diag(s** 0.25), q_labels=(qlab,)*2) for qlab, s in block_dict.items()] + s1 = t1.__class__(blocks=s1, pattern=s1_pattern) + s2 = t2.__class__(blocks=s2, pattern=s2_pattern) + if flat: + s1 = s1.to_flat() + s2 = s2.to_flat() + return s1, s2 diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py new file mode 100644 index 00000000..e6d1e9a3 --- /dev/null +++ b/quimb/tensor/fermion.py @@ -0,0 +1,1099 @@ +"""Core tensor network tools. +""" +import os +import copy +import functools + +import numpy as np + +from ..utils import (check_opt, oset, valmap) +from .drawing import draw_tn + +from .tensor_core import Tensor, TensorNetwork, _parse_split_opts, oset_union, tags_to_oset, rand_uuid, _parse_split_opts +from .tensor_block import tensor_split as _tensor_split +from .tensor_block import _core_contract, tensor_canonize_bond, tensor_compress_bond, BlockTensor, BlockTensorNetwork, get_block_contraction_path_info +from .block_tools import apply, get_smudge_balance +from .block_interface import dispatch_settings +from functools import wraps + +def contract_decorator(fn): + @wraps(fn) + def wrapper(T1, T2, *args, **kwargs): + tid1, site1 = T1.get_fermion_info() + tid2, site2 = T2.get_fermion_info() + fs = T1.fermion_owner[0] + if site1 > site2: + fs.move(tid1, site2+1) + out = fn(T1, T2, *args, **kwargs) + else: + fs.move(tid2, site1+1) + out = fn(T2, T1, *args, **kwargs) + if not isinstance(out, (float, complex)): + fs.replace_tensor(min(site1, site2), out, virtual=True) + fs.remove_tensor(min(site1, site2)+1) + return out + return wrapper + +_core_contract = contract_decorator(_core_contract) + +def compress_decorator(fn): + @wraps(fn) + def wrapper(T1, T2, *args, **kwargs): + tid1, site1 = T1.get_fermion_info() + tid2, site2 = T2.get_fermion_info() + fs = T1.fermion_owner[0] + loc_dict = {tid1: site1, tid2: site2} + if site1 > site2: + fs.move(tid1, site2+1) + else: + fs.move(tid1, site2) + fn(T1, T2, *args, **kwargs) + fs._reorder_from_dict(loc_dict) + return T1, T2 + return wrapper + +tensor_compress_bond = compress_decorator(tensor_compress_bond) +tensor_canonize_bond = compress_decorator(tensor_canonize_bond) + +class FermionSpace: + def __init__(self, tensor_order=None, virtual=True): + self.tensor_order = {} + if tensor_order is not None: + for tid, (tsr, site) in tensor_order.items(): + self.add_tensor(tsr, tid, site, virtual=virtual) + + @property + def sites(self): + """ return a list of all the occupied positions + """ + if len(self.tensor_order) == 0: + return [] + else: + return [val[1] for val in self.tensor_order.values()] + + def copy(self): + """ Copy the FermionSpace object. Tensor ids and positions will be + preserved and tensors will be copied + """ + return FermionSpace(self.tensor_order, virtual=False) + + def add_tensor(self, tsr, tid=None, site=None, virtual=False): + """ Add a tensor to the current FermionSpace, eg + 01234 0123456 + XXXXX, (6, B) -> XXXXX-B + + Parameters + ---------- + tsr : FermionTensor + The fermionic tensor to operate on + tid : string, optional + tensor id + site: int or None, optional + The position to place the tensor. Tensor will be + appended to last position if not specified + virtual: bool, optional + whether to add the tensor inplace + + """ + if (tid is None) or (tid in self.tensor_order.keys()): + tid = rand_uuid(base="_T") + if site is None: + site = 0 if len(self.sites)==0 else max(self.sites) + 1 + elif site in self.sites: + raise ValueError("site:%s occupied, use replace/insert_tensor method"%site) + + T = tsr if virtual else tsr.copy() + self.tensor_order[tid] = (T, site) + T.set_fermion_owner(self, tid) + + def replace_tensor(self, site, tsr, tid=None, virtual=False): + """ Replace the tensor at a given site, eg + 0123456789 0123456789 + XXXXAXXXXX, (4, B) -> XXXXBXXXXX + + Parameters + ---------- + site: int + The position to replace the tensor + tsr : FermionTensor + The fermionic tensor to operate on + tid : string, optional + rename a new tensor id if provided + virtual: bool, optional + whether to replace the tensor inplace + """ + atid = self.get_tid_from_site(site) + atsr = self.tensor_order[atid][0] + T = tsr if virtual else tsr.copy() + if tid is None or (tid in self.tensor_order.keys() and tid != atid): + tid = atid + T.set_fermion_owner(self, tid) + atsr.remove_fermion_owner() + del self.tensor_order[atid] + self.tensor_order[tid] = (T, site) + + def insert_tensor(self, site, tsr, tid=None, virtual=False): + """ insert a tensor at a given site, all tensors afterwards + will be shifted forward by 1, eg, + 012345678 0123456789 + ABCDEFGHI, (4, X) -> ABCDXEFGHI + + Parameters + ---------- + site: int + The position to insert the tensor + tsr : FermionTensor + The fermionic tensor to operate on + tid : string, optional + rename a new tensor id if provided + virtual: bool, optional + whether to insert the tensor inplace + """ + if (tid is None) or (tid in self.tensor_order.keys()): + tid = rand_uuid(base="_T") + if site not in self.sites: + self.add_tensor(tsr, tid, site=site, virtual=virtual) + else: + T = tsr if virtual else tsr.copy() + T.set_fermion_owner(self, tid) + for atid, (atsr, asite) in self.tensor_order.items(): + if asite >= site: + self.tensor_order.update({atid: (atsr, asite+1)}) + self.tensor_order.update({tid: (T, site)}) + + def insert(self, site, *tsrs, virtual=False): + """ insert a group of tensors at a given site, all tensors afterwards + will be shifted forward accordingly, eg, + 0123456 0123456789 + ABCDEFG, (4, (X,Y,Z)) -> ABCDXYZEFG + + Parameters + ---------- + site: int + The position to begin inserting the tensor + tsrs : a tuple/list of FermionTensor + The fermionic tensors to operate on + virtual: bool, optional + whether to insert the tensors inplace + """ + for T in tsrs: + self.insert_tensor(site, T, virtual=virtual) + site += 1 + + def get_tid_from_site(self, site): + """ Return the tensor id at given site + + Parameters + ---------- + site: int + The position to obtain the tensor id + """ + if site not in self.sites: + raise KeyError("site:%s not occupied"%site) + idx = self.sites.index(site) + return list(self.tensor_order.keys())[idx] + + def get_full_info(self, tid_or_site): + if isinstance(tid_or_site, str): + tid = tid_or_site + else: + tid = self.get_tid_from_site(self, tid_or_site) + T, site = self.tensor_order[tid_or_site] + return T, tid, site + + def get_ind_map(self): + ind_map = dict() + for tid, (T, _) in self.tensor_order.items(): + for ind in T.inds: + if ind not in ind_map: + ind_map[ind] = oset([tid]) + else: + ind_map[ind] = ind_map[ind].union(oset([tid])) + return ind_map + + def _reorder_from_dict(self, tid_map): + """ inplace reordering of tensors from a tensor_id/position mapping. + Pizorn algorithm will be applied during moving + + Parameters + ---------- + tid_map: dictionary + Mapping of tensor id to the desired location + """ + if len(tid_map) == len(self.tensor_order): + self.reorder_all_(tid_map) + else: + tid_lst = list(tid_map.keys()) + des_sites = list(tid_map.values()) + # sort the destination sites to avoid cross-overs during moving + work_des_sites = sorted(des_sites)[::-1] + for isite in work_des_sites: + ind = des_sites.index(isite) + self.move(tid_lst[ind], isite) + + def reorder_all(self, tid_map, ind_map=None, inplace=False): + """ reordering of tensors from a tensor_id/position mapping. The tid_map + must contains the mapping for all tensors in this FermionSpace. + Pizorn algorithm will be applied during moving. + + Parameters + ---------- + tid_map: dictionary + Mapping of tensor id to the desired location + ind_map: dictinary, optional + Mapping of tensor index to the tensor id + inplace: bool, optional + Whether the orordering operation is inplace or not + """ + fs = self if inplace else self.copy() + # Compute Global Phase + if len(tid_map) != len(fs.tensor_order): + raise ValueError("tid_map must be of equal size as the FermionSpace") + nsites = len(fs.tensor_order) + parity_tab = [] + input_tab = [] + free_tids = [] + for isite in range(nsites): + tid = fs.get_tid_from_site(isite) + T = fs.tensor_order[tid][0] + if not T.avoid_phase: + free_tids.append(tid) + parity_tab.append(T.parity) + input_tab.append(tid) + + tid_lst = list(tid_map.keys()) + des_sites = list(tid_map.values()) + global_parity = 0 + for fsite in range(nsites-1, -1, -1): + idx = des_sites.index(fsite) + tid = tid_lst[idx] + isite = input_tab.index(tid) + if isite == fsite: continue + global_parity += np.sum(parity_tab[isite+1:fsite+1]) * parity_tab[isite] + parity_tab[isite:fsite+1] = parity_tab[isite+1:fsite+1]+[parity_tab[isite]] + input_tab[isite:fsite+1] = input_tab[isite+1:fsite+1]+[input_tab[isite]] + + _global_flip = (int(global_parity) % 2 == 1) + if _global_flip: + if len(free_tids) ==0: + raise ValueError("Global flip required on one tensor but all tensors are marked to avoid phase") + T = fs.tensor_order[free_tids[0]][0] + T.flip_(global_flip=_global_flip) + + # Compute Local Phase + if ind_map is None: + ind_map = fs.get_ind_map() + else: + ind_map = ind_map.copy() + + local_flip_info = dict() + for tid1, fsite1 in tid_map.items(): + T1, isite1 = fs.tensor_order[tid1] + for ind in T1.inds: + tids = ind_map.pop(ind, []) + if len(tids) <2: + continue + tid2, = tids - oset([tid1]) + T2, isite2 = fs.tensor_order[tid2] + fsite2 = tid_map[tid2] + if (isite1-isite2) * (fsite1-fsite2) < 0: + if T1.avoid_phase and T2.avoid_phase: + raise ValueError("relative order for %s and %s changed, local phase can not be avoided"%(tid1, tid2)) + else: + marked_tid = tid2 if T1.avoid_phase else tid1 + if marked_tid not in local_flip_info: + local_flip_info[marked_tid] = [ind] + else: + local_flip_info[marked_tid].append(ind) + + for tid, inds in local_flip_info.items(): + T = fs.tensor_order[tid][0] + T.flip_(local_inds=inds) + + for tid, fsite in tid_map.items(): + T = fs.tensor_order[tid][0] + fs.tensor_order.update({tid: (T, fsite)}) + return fs + + reorder_all_ = functools.partialmethod(reorder_all, inplace=True) + + def __setitem__(self, site, tsr): + if site in self.sites: + self.replace_tensor(site, tsr, virtual=True) + else: + self.add_tensor(site, tsr, virtual=True) + + def move(self, tid_or_site, des_site): + """ Move a tensor inside this FermionSpace to the specified position with Pizorn algorithm. + Both local and global phase will be factorized to this single tensor + + Parameters + ---------- + tid_or_site: string or int + id or position of the original tensor + des_site: int + the position to move the tensor to + """ + + tsr, tid, site = self.get_full_info(tid_or_site) + avoid_phase = tsr.avoid_phase + if site == des_site: return + move_left = (des_site < site) + iterator = range(des_site, site) if move_left else range(site+1, des_site+1) + shared_inds = [] + tid_lst = [self.get_tid_from_site(isite) for isite in iterator] + parity = 0 + for itid in tid_lst: + itsr, isite = self.tensor_order[itid] + i_shared_inds = list(oset(itsr.inds) & oset(tsr.inds)) + if avoid_phase: + global_flip = (tsr.parity * itsr.parity == 1) + if len(i_shared_inds)>0 or global_flip: + if itsr.avoid_phase: + raise ValueError("Two tensors marked to avoid phase") + itsr.flip_(global_flip=global_flip, local_inds=i_shared_inds) + else: + shared_inds += i_shared_inds + parity += itsr.parity + + if move_left: + self.tensor_order[itid] = (itsr, isite+1) + else: + self.tensor_order[itid] = (itsr, isite-1) + + if not avoid_phase: + global_parity = (parity % 2) * tsr.data.parity + global_flip = (global_parity == 1) + tsr.flip_(global_flip=global_flip, local_inds=shared_inds) + + self.tensor_order[tid] = (tsr, des_site) + + def move_past(self, tsr, site_range=None): + """ Move an external tensor past the specifed site ranges with Pizorn algorithm. + Both local and global phase will be factorized to the external tensor. + The external tensor will not be added to this FermionSpace + + Parameters + ---------- + tsr: FermionTensor + the external + site_range: a tuple of integers, optional + the range of the tensors to move past, if not specified, will be the whole space + """ + if site_range is None: + sites = self.sites + site_range = (min(sites), max(sites)+1) + start, end = site_range + shared_inds = [] + tid_lst = [self.get_tid_from_site(isite) for isite in range(start, end)] + parity = 0 + for itid in tid_lst: + itsr, isite = self.tensor_order[itid] + parity += itsr.parity + shared_inds += list(oset(itsr.inds) & oset(tsr.inds)) + global_parity = (parity % 2) * tsr.data.parity + if global_parity != 0: tsr.data._global_flip() + axes = [tsr.inds.index(i) for i in shared_inds] + if len(axes)>0: tsr.data._local_flip(axes) + return tsr + + def make_adjacent(self, tid1, tid2, direction='left'): + """ Move one tensor in the specified direction to make the two adjacent + """ + site1 = self.tensor_order[tid1][1] + site2 = self.tensor_order[tid2][1] + if abs(site1-site2)!=1: + site1 = self.tensor_order[tid1][1] + site2 = self.tensor_order[tid2][1] + if site1 == site2: return + sitemin, sitemax = min(site1, site2), max(site1, site2) + if direction == 'left': + self.move(sitemax, sitemin+1) + elif direction == 'right': + self.move(sitemin, sitemax-1) + else: + raise ValueError("direction %s not recognized"%direction) + + def remove_tensor(self, site): + """ remove a specified tensor at a given site, eg + 012345 01234 + ABCDEF, (3, True) -> ABCEF + """ + tid = self.get_tid_from_site(site) + tsr = self.tensor_order[tid][0] + tsr.remove_fermion_owner() + del self.tensor_order[tid] + + indent_sites = sorted([isite for isite in self.sites if isite>site]) + tid_lst = [self.get_tid_from_site(isite) for isite in indent_sites] + for tid in tid_lst: + tsr, site = self.tensor_order[tid] + self.tensor_order[tid] = (tsr, site-1) + + @property + def H(self): + """ Construct a FermionSpace for the bra state of the tensors + """ + max_site = max(self.sites) + new_fs = FermionSpace() + for tid, (tsr, site) in self.tensor_order.items(): + T = tsr.copy() + T.modify(data=T.data.dagger, inds=T.inds[::-1]) + new_fs.add_tensor(T, tid, max_site-site, virtual=True) + return new_fs + + def _contract_pairs(self, tid1, tid2, output_inds=None): + self.make_adjacent(tid1, tid2) + T1, site1 = self.tensor_order[tid1] + T2, site2 = self.tensor_order[tid2] + out = _contract_connected(T1, T2, output_inds) + self.replace_tensor(min(site1, site2), out, virtual=True) + self.remove_tensor(max(site1, site2)) + +# --------------------------------------------------------------------------- # +# Tensor Funcs # +# --------------------------------------------------------------------------- # + +def tensor_contract(*tensors, output_inds=None, inplace=False, **contract_opts): + if len(tensors) == 1: + if inplace: + return tensors[0] + else: + return tensors[0].copy() + path_info = get_block_contraction_path_info(*tensors, **contract_opts) + fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) + if inplace: + tensors = list(tensors) + else: + tensors = [fs.tensor_order[tid][0] for tid in tid_lst] + + for conc in path_info.contraction_list: + pos1, pos2 = sorted(conc[0]) + T2 = tensors.pop(pos2) + T1 = tensors.pop(pos1) + out = _core_contract(T1, T2) + tensors.append(out) + + if not isinstance(out, (float, complex)): + _output_inds = out.inds + if output_inds is None: + output_inds = _output_inds + else: + output_inds = tuple(output_inds) + if output_inds!=_output_inds: + out.transpose_(*output_inds) + return out + +def tensor_split( + T, + left_inds, + method='svd', + get=None, + absorb='both', + max_bond=None, + cutoff=1e-10, + cutoff_mode='rel', + renorm=None, + ltags=None, + rtags=None, + stags=None, + bond_ind=None, + right_inds=None, + qpn_info = None, +): + if get is not None: + return _tensor_split(T, left_inds, method=method, get=get, absorb=absorb, max_bond=max_bond, + cutoff=cutoff, cutoff_mode=cutoff_mode, renorm=renorm, ltags=ltags, rtags=rtags, + stags=stags, bond_ind=bond_ind, right_inds=right_inds, qpn_info=qpn_info) + else: + tensors = _tensor_split(T, left_inds, method=method, get="tensors", absorb=absorb, max_bond=max_bond, + cutoff=cutoff, cutoff_mode=cutoff_mode, renorm=renorm, ltags=ltags, rtags=rtags, + stags=stags, bond_ind=bond_ind, right_inds=right_inds, qpn_info=qpn_info) + return FermionTensorNetwork(tensors[::-1], check_collisions=False) + +def is_mergeable(*ts_or_tsn): + """Check if all FermionTensor or FermionTensorNetwork objects + are part of the same FermionSpace + """ + if len(ts_or_tsn)==1 and isinstance(ts_or_tsn, (FermionTensor, FermionTensorNetwork)): + return True + fs_lst = [] + site_lst = [] + for obj in ts_or_tsn: + if isinstance(obj, FermionTensor): + if obj.fermion_owner is None: + return False + fsobj, tid = obj.fermion_owner + fs_lst.append(hash(fsobj)) + site_lst.append(fsobj.tensor_order[tid][1]) + elif isinstance(obj, FermionTensorNetwork): + fs_lst.append(hash(obj.fermion_space)) + site_lst.extend(obj.filled_sites) + else: + raise TypeError("unable to find fermionspace") + + return all([fs==fs_lst[0] for fs in fs_lst]) and len(set(site_lst)) == len(site_lst) + +def _fetch_fermion_space(*tensors, inplace=True): + """ Retrieve the FermionSpace and the associated tensor_ids for the tensors. + If the given tensors all belong to the same FermionSpace object (fsobj), + the underlying fsobj will be returned. Otherwise, a new FermionSpace will be created, + and the tensors will be placed in the same order as the input tensors. + + Parameters + ---------- + tensors : a tuple or list of FermionTensors + input_tensors + inplace: bool + if not true, a new FermionSpace will be created with all tensors copied. + so subsequent operations on the fsobj will not alter the input tensors. + + Returns + ------- + fs : a FermionSpace object + tid_lst: a list of strings for the tensor_ids + """ + + if is_mergeable(*tensors): + if isinstance(tensors[0], FermionTensor): + fs = tensors[0].fermion_owner[0] + else: + fs = tensors[0].fermion_space + if not inplace: + fs = fs.copy() + tid_lst = [] + for tsr_or_tn in tensors: + if isinstance(tsr_or_tn, FermionTensor): + tid_lst.append(tsr_or_tn.get_fermion_info()[0]) + else: + tid_lst.append(tsr_or_tn.tensor_map.keys()) + else: + fs = FermionSpace() + for tsr_or_tn in tensors[::-1]: + if isinstance(tsr_or_tn, FermionTensor): + fs.add_tensor(tsr_or_tn, virtual=inplace) + elif isinstance(tsr_or_tn, FermionTensorNetwork): + if not tsr_or_tn.is_continuous(): + raise ValueError("Input Network not continous, merge not allowed") + for itsr in tsr_or_tn: + fs.add_tensor(itsr, virtual=inplace) + tid_lst = list(fs.tensor_order.keys()) + return fs, tid_lst + +# --------------------------------------------------------------------------- # +# Tensor Class # +# --------------------------------------------------------------------------- # + +class FermionTensor(BlockTensor): + + __slots__ = ('_data', '_inds', '_tags', '_left_inds', + '_owners', '_fermion_owner', '_avoid_phase', + '_fermion_path') + + def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): + + # a new or copied Tensor always has no owners + self._fermion_owner = None + BlockTensor.__init__(self, data=data, inds=inds, tags=tags, left_inds=left_inds) + if isinstance(data, FermionTensor): + self._data = data.data.copy() + self._avoid_phase = data._avoid_phase + self._fermion_path = data._fermion_path.copy() + else: + self._avoid_phase = False + self._fermion_path = dict() + + @property + def symmetry(self): + return self.data.dq.__name__ + + @property + def net_symmetry(self): + return self.data.dq + + @property + def avoid_phase(self): + return self._avoid_phase + + @property + def fermion_path(self): + return self._fermion_path + + @avoid_phase.setter + def avoid_phase(self, avoid_phase): + self._avoid_phase = avoid_phase + + @fermion_path.setter + def fermion_path(self, fermion_path): + self._fermion_path = fermion_path + + def set_fermion_path(self, global_flip=False, local_inds=None): + if local_inds is None: + local_inds = [] + _global_flip = self.fermion_path.pop("global_flip", False) + _local_inds = self.fermion_path.pop("local_inds", []) + self._fermion_path["global_flip"] = _global_flip ^ global_flip + all_inds = tuple(local_inds) + tuple(_local_inds) + updated_local_inds = [] + for ind in all_inds: + count = all_inds.count(ind) + if count %2 ==1: + updated_local_inds.append(ind) + self._fermion_path["local_inds"] = updated_local_inds + + @property + def fermion_owner(self): + return self._fermion_owner + + @property + def parity(self): + return self.data.parity + + def modify(self, **kwargs): + if "inds" in kwargs and "data" not in kwargs: + inds = kwargs.get("inds") + local_inds = self.fermion_path.pop("local_inds", []) + new_local_inds = [] + for ind in local_inds: + if ind in self.inds: + new_ind = inds[self.inds.index(ind)] + new_local_inds.append(new_ind) + self._fermion_path["local_inds"] = new_local_inds + + super().modify(**kwargs) + + def flip(self, global_flip=False, local_inds=None, inplace=False): + T = self if inplace else self.copy() + T.set_fermion_path(global_flip=global_flip, local_inds=local_inds) + if global_flip: + T.data._global_flip() + if local_inds is not None and len(local_inds)>0: + axes = [T.inds.index(ind) for ind in local_inds] + T.data._local_flip(axes) + return T + + flip_ = functools.partialmethod(flip, inplace=True) + + def copy(self, deep=False): + """Copy this tensor. Note by default (``deep=False``), the underlying + array will *not* be copied. The fermion owner will to reset to None + """ + if deep: + t = copy.deepcopy(self) + t.avoid_phase = self.avoid_phase + t.fermion_path = self.fermion_path.copy() + t.remove_fermion_owner() + else: + t = self.__class__(self, None) + return t + + def get_fermion_info(self): + if self.fermion_owner is None: + return None + fs, tid = self.fermion_owner + site = fs.tensor_order[tid][1] + return (tid, site) + + def contract(self, *others, output_inds=None, **opts): + return tensor_contract(self, *others, output_inds=output_inds, **opts) + + @fermion_owner.setter + def fermion_owner(self, fowner): + self._fermion_owner = fowner + + def set_fermion_owner(self, fs, tid): + self.fermion_owner = (fs, tid) + + def remove_fermion_owner(self): + self.fermion_owner = None + + def split(self, *args, **kwargs): + return tensor_split(self, *args, **kwargs) + + def __and__(self, other): + """Combine with another ``Tensor`` or ``TensorNetwork`` into a new + ``TensorNetwork``. + """ + return FermionTensorNetwork((self, other)) + + def __or__(self, other): + """Combine virtually (no copies made) with another ``Tensor`` or + ``TensorNetwork`` into a new ``TensorNetwork``. + """ + return FermionTensorNetwork((self, other), virtual=True) + + def draw(self, *args, **kwargs): + """Plot a graph of this tensor and its indices. + """ + draw_tn(FermionTensorNetwork((self,)), *args, **kwargs) + + graph = draw + +# --------------------------------------------------------------------------- # +# Tensor Network Class # +# --------------------------------------------------------------------------- # + +class FermionTensorNetwork(BlockTensorNetwork): + + __slots__ = ('_inner_inds', '_outer_inds', '_tid_counter') + _EXTRA_PROPS = () + _CONTRACT_STRUCTURED = False + + def __init__(self, ts, *, virtual=False, check_collisions=True): + + # short-circuit for copying TensorNetworks + if isinstance(ts, self.__class__): + if not ts.is_continuous(): + raise TypeError("Tensors not continuously placed in the network, \ + this maybe due to this network being part of another network") + fs = FermionSpace() + self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map) + self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map) + self.tensor_map = dict() + for t in ts: + tid = t.get_fermion_info()[0] + t = t.copy() + self.tensor_map[tid] = t + self.tensor_map[tid].add_owner(self, tid) + fs.add_tensor(t, tid=tid, virtual=True) + self._inner_inds = ts._inner_inds.copy() + self._outer_inds = ts._outer_inds.copy() + self._tid_counter = ts._tid_counter + self.exponent = ts.exponent + for ep in ts.__class__._EXTRA_PROPS: + setattr(self, ep, getattr(ts, ep)) + return + else: + BlockTensorNetwork.__init__(self, ts, virtual=virtual, check_collisions=True) + + @property + def fermion_space(self): + if len(self.tensor_map)==0: + return FermionSpace() + else: + return list(self.tensor_map.values())[0].fermion_owner[0] + + @property + def filled_sites(self): + return [self.fermion_space.tensor_order[tid][1] for tid in self.tensor_map.keys()] + + @property + def H(self): + tn = self.copy(full=True) + fs = tn.fermion_space + max_site = max(fs.sites) + for tid, (T, site) in fs.tensor_order.items(): + T.modify(data=T.data.dagger, inds=T.inds[::-1]) + fs.tensor_order.update({tid: (T, max_site-site)}) + return tn + + def is_continuous(self): + """ + Check if sites in the current tensor network are contiguously occupied + """ + filled_sites = self.filled_sites + if len(filled_sites) ==0 : return True + return (max(filled_sites) - min(filled_sites) + 1) == len(filled_sites) + + def copy(self, full=False): + """ For full copy, the tensors and underlying FermionSpace(all tensors in it) will + be copied. For partial copy, the tensors in this network must be continuously + placed and a new FermionSpace will be created to hold this continous sector. + """ + if full: + fs = self.fermion_space.copy() + tids = list(self.tensor_map.keys()) + tsr = [fs.tensor_order[tid][0] for tid in tids] + newtn = FermionTensorNetwork(tsr, virtual=True) + else: + if not self.is_continuous(): + raise TypeError("Tensors not continuously placed in the network, \ + partial copy not allowed") + newtn = FermionTensorNetwork(self) + newtn.view_like_(self) + return newtn + + def __and__(self, other): + """Combine this tensor network with more tensors, without contracting. + Copies the tensors. + """ + if is_mergeable(self, other): + raise ValueError("the two networks are in the same fermionspace, use self |= other") + return FermionTensorNetwork((self, other), virtual=False) + + def __or__(self, other): + """Combine this tensor network with more tensors, without contracting. + Views the constituent tensors. + """ + return FermionTensorNetwork((self, other), virtual=True) + + def __iter__(self): + sorted_sites = sorted(self.filled_sites) + for isite in sorted_sites: + tid = self.fermion_space.get_tid_from_site(isite) + yield self.tensor_map[tid] + + def __setitem__(self, tags, tensor): + """Set the single tensor uniquely associated with ``tags``. + """ + tids = self._get_tids_from_tags(tags, which='all') + if len(tids) != 1: + raise KeyError("'TensorNetwork.__setitem__' is meant for a single " + "existing tensor only - found {} with tag(s) '{}'." + .format(len(tids), tags)) + + if not isinstance(tensor, FermionTensor): + raise TypeError("Can only set value with a new 'FermionTensor'.") + + tid, = tids + site = self.fermion_space.tensor_order[tid][1] + TensorNetwork._pop_tensor(self, tid) + TensorNetwork.add_tensor(self, tensor, tid=tid, virtual=True) + self.fermion_space.replace_tensor(site, tensor, tid=tid, virtual=True) + + def _reorder_from_tid(self, tid_map, inplace=False): + tn = self if inplace else self.copy(full=True) + tn.fermion_space._reorder_from_dict(tid_map) + return tn + + def add_tensor(self, tsr, tid=None, virtual=False, site=None): + if tid is None or tid in self.fermion_space.tensor_order.keys(): + tid = rand_uuid(base="_T") + + if virtual: + fs = tsr.fermion_owner + if fs is not None: + if hash(fs[0]) != hash(self.fermion_space) and len(self.tensor_map)!=0: + raise ValueError("the tensor is already in a different FermionSpace, inplace addition not allowed") + else: + tid, isite = tsr.get_fermion_info() + if site is not None and site != isite: + raise ValueError("the specified site not consistent with the original location of this tensor in the FermionSpace") + TensorNetwork.add_tensor(self, tsr, tid, virtual=True) + else: + self.fermion_space.add_tensor(tsr, tid, site, virtual=True) + TensorNetwork.add_tensor(self, tsr, tid, virtual=True) + else: + T = tsr.copy() + self.fermion_space.add_tensor(T, tid, site, virtual=True) + TensorNetwork.add_tensor(self, T, tid, virtual=True) + + def add_tensor_network(self, tn, virtual=False, check_collisions=True): + if virtual: + if min(len(self.tensor_map), len(tn.tensor_map)) == 0: + TensorNetwork.add_tensor_network(self, tn, virtual=virtual, check_collisions=check_collisions) + return + elif hash(tn.fermion_space) == hash(self.fermion_space): + if is_mergeable(self, tn): + TensorNetwork.add_tensor_network(self, tn, virtual=virtual, check_collisions=check_collisions) + else: + raise ValueError("the two tensornetworks co-share same sites, inplace addition not allow") + return + + if not tn.is_continuous(): + raise ValueError("input tensor network is not contiguously ordered") + + sorted_tensors = [] + for tsr in tn: + tid = tsr.get_fermion_info()[0] + sorted_tensors.append([tid, tsr]) + # if inplace, fermion_owners need to be removed first to avoid conflicts + if virtual: + for tid, tsr in sorted_tensors: + tsr.remove_fermion_owner() + + if check_collisions: # add tensors individually + if getattr(self, '_inner_inds', None) is None: + self._inner_inds = oset(self.inner_inds()) + + # check for matching inner_indices -> need to re-index + other_inner_ix = oset(tn.inner_inds()) + clash_ix = self._inner_inds & other_inner_ix + + if clash_ix: + can_keep_ix = other_inner_ix - self._inner_inds + new_inds = oset(rand_uuid() for _ in range(len(clash_ix))) + reind = dict(zip(clash_ix, new_inds)) + self._inner_inds.update(new_inds, can_keep_ix) + else: + self._inner_inds.update(other_inner_ix) + + # add tensors, reindexing if necessary + for tid, tsr in sorted_tensors: + if clash_ix and any(i in reind for i in tsr.inds): + tsr = tsr.reindex(reind, inplace=virtual) + self.add_tensor(tsr, tid=tid, virtual=virtual) + + else: # directly add tensor/tag indexes + for tid, tsr in sorted_tensors: + self.add_tensor(tsr, tid=tid, virtual=virtual) + + def select(self, tags, which='all'): + tagged_tids = self._get_tids_from_tags(tags, which=which) + ts = [self.tensor_map[n] for n in tagged_tids] + tn = FermionTensorNetwork(ts, check_collisions=False, virtual=True) + tn.view_like_(self) + return tn + + def _pop_tensor(self, tid, remove_from_fs=False): + """Remove a tensor from this network, returning said tensor. + """ + # pop the tensor itself + t = self.tensor_map.pop(tid) + + # remove the tid from the tag and ind maps + self._unlink_tags(t.tags, tid) + self._unlink_inds(t.inds, tid) + + # remove this tensornetwork as an owner + t.remove_owner(self) + if remove_from_fs: + self.fermion_space.remove_tensor(tid) + t.remove_fermion_owner() + + return t + + _pop_tensor_ = functools.partialmethod(_pop_tensor, remove_from_fs=True) + + def partition_tensors(self, tags, inplace=False, which='any'): + tagged_tids = self._get_tids_from_tags(tags, which=which) + + # check if all tensors have been tagged + if len(tagged_tids) == self.num_tensors: + return None, self.tensor_map.values() + + # Copy untagged to new network, and pop tagged tensors from this + untagged_tn = self if inplace else self.copy(full=True) + tagged_ts = tuple(map(untagged_tn._pop_tensor, sorted(tagged_tids))) + + return untagged_tn, tagged_ts + + def partition(self, tags, which='any', inplace=False): + """Split this TN into two, based on which tensors have any or all of + ``tags``. Unlike ``partition_tensors``, both results are TNs which + inherit the structure of the initial TN. + + Parameters + ---------- + tags : sequence of str + The tags to split the network with. + which : {'any', 'all'} + Whether to split based on matching any or all of the tags. + inplace : bool + If True, actually remove the tagged tensors from self. + + Returns + ------- + untagged_tn, tagged_tn : (TensorNetwork, TensorNetwork) + The untagged and tagged tensor networs. + + See Also + -------- + partition_tensors, select, select_tensors + """ + tagged_tids = self._get_tids_from_tags(tags, which=which) + kws = {'check_collisions': False} + t1 = self if inplace else self.copy(full=True) + t2s = [t1._pop_tensor(tid) for tid in tagged_tids] + t2 = FermionTensorNetwork(t2s, virtual=True, **kws) + t2.view_like_(self) + return t1, t2 + + def contract_between(self, tags1, tags2, **contract_opts): + tid1, = self._get_tids_from_tags(tags1, which='all') + tid2, = self._get_tids_from_tags(tags2, which='all') + + # allow no-op for same tensor specified twice ('already contracted') + if tid1 == tid2: + return + T1 = self._pop_tensor(tid1) + T2 = self._pop_tensor(tid2) + T12 = tensor_contract(T1, T2, inplace=True, **contract_opts) + if isinstance(T12, (float, complex)): + return T12 + else: + self |= T12 + + def contract_ind(self, ind, **contract_opts): + """Contract tensors connected by ``ind``. + """ + tids = self._get_tids_from_inds(ind) + ts = [self._pop_tensor(tid) for tid in tids] + out = tensor_contract(*ts, inplace=True, **contract_opts) + if isinstance(out, (float, complex)): + return out + else: + self |= out + + def _compress_between_tids( + self, + tid1, + tid2, + canonize_distance=None, + canonize_opts=None, + equalize_norms=False, + **compress_opts + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + + if canonize_distance: + raise NotImplementedError + + tensor_compress_bond(Tl, Tr, **compress_opts) + + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + + def _canonize_between_tids( + self, + tid1, + tid2, + equalize_norms=False, + **canonize_opts, + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + tensor_canonize_bond(Tl, Tr, **canonize_opts) + + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + + # ----------------------- contracting the network ----------------------- # + def contract_tags(self, tags, inplace=False, which='any', **opts): + untagged_tn, tagged_ts = self.partition_tensors( + tags, inplace=inplace, which=which) + + if not tagged_ts: + raise ValueError("No tags were found - nothing to contract. " + "(Change this to a no-op maybe?)") + + contracted = tensor_contract(*tagged_ts, inplace=True, **opts) + + if untagged_tn is None: + return contracted + + untagged_tn.add_tensor(contracted, virtual=True) + return untagged_tn + + def contract(self, tags=..., inplace=False, **opts): + if tags is all: + return tensor_contract(*self, inplace=inplace, **opts) + + # this checks whether certain TN classes have a manually specified + # contraction pattern (e.g. 1D along the line) + if self._CONTRACT_STRUCTURED: + if (tags is ...) or isinstance(tags, slice): + return self.contract_structured(tags, inplace=inplace, **opts) + + # else just contract those tensors specified by tags. + return self.contract_tags(tags, inplace=inplace, **opts) + + contract_ = functools.partialmethod(contract, inplace=True) + + def __matmul__(self, other): + """Overload "@" to mean full contraction with another network. + """ + return FermionTensorNetwork((self, other)) ^ ... diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py new file mode 100644 index 00000000..8e8d6ede --- /dev/null +++ b/quimb/tensor/fermion_2d.py @@ -0,0 +1,1236 @@ +"""Classes and algorithms related to Fermionic 2D tensor networks. +""" +import re +import functools +from operator import add +from itertools import product +from collections import defaultdict + +from ..utils import check_opt, pairwise +from .tensor_core import ( + bonds, + rand_uuid, + oset, + tags_to_oset +) +from .tensor_2d import ( + Rotator2D, + TensorNetwork2D, + TensorNetwork2DVector, + TensorNetwork2DFlat, + TensorNetwork2DOperator, + PEPS, + PEPO, + is_lone_coo, + gen_long_range_path, + calc_plaquette_sizes, + calc_plaquette_map +) +from .fermion import ( + FermionTensor, + FermionTensorNetwork, + tensor_contract +) +from .block_gen import rand_all_blocks, ones_single_block + +INVERSE_CUTOFF = 1e-10 + +class FermionTensorNetwork2D(FermionTensorNetwork, TensorNetwork2D): + """A subclass of ``quimb.tensor.tensor_2d.TensorNetwork2D`` that overrides methods + that depend on ordering of the tensors. Reorder method is added to aid row/column-wise + operations. Environments are now computed as an entire FermionTensorNetwork so that the + plaquettes are placed correctly + + """ + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + ) + + def _compatible_2d(self, other): + """Check whether ``self`` and ``other`` are compatible 2D tensor + networks such that they can remain a 2D tensor network when combined. + """ + return ( + isinstance(other, FermionTensorNetwork2D) and + all(getattr(self, e) == getattr(other, e) + for e in FermionTensorNetwork2D._EXTRA_PROPS) + ) + + def __and__(self, other): + new = super().__and__(other) + if self._compatible_2d(other): + new.view_as_(FermionTensorNetwork2D, like=self) + return new + + def __or__(self, other): + new = super().__or__(other) + if self._compatible_2d(other): + new.view_as_(FermionTensorNetwork2D, like=self) + return new + + def flatten(self, fuse_multibonds=True, inplace=False): + raise NotImplementedError + + def reorder(self, direction, layer_tags=None, inplace=False): + r"""Reorder all tensors either row/column-wise + + If ``direction == 'row'`` then:: + + | | | | | | | + Row 0: ─●─>●─>●─>●─>●─>●─>●─ then Row 1 + | | | | | | | + Row 1: ─●─>●─>●─>●─>●─>●─>●─ then Row 2 + | | | | | | | + Row 2: ─●─>●─>●─>●─>●─>●─>●─ + | | | | | | | + + If ``direction == 'col'`` then:: + + v v v v v v v + ─●──●──●──●──●──●──●─ + v v v v v v v + ─●──●──●──●──●──●──●─ + v v v v v v v + ─●──●──●──●──●──●──●─ + v v v v v v v + + Parameters + ---------- + direction : {"row", "col"} + The direction to reorder the entire network + layer_tags : optional + The relative order within a single coordinate + inplace : bool, optional + Whether to perform the operation inplace + """ + Lx, Ly = self._Lx, self._Ly + tid_map = dict() + current_position = 0 + if direction == "row": + iterator = product(range(Lx), range(Ly)) + elif direction == "col": + iterator = product(range(Ly), range(Lx)) + else: + raise KeyError("direction not supported") + + for i, j in iterator: + x, y = (i, j) if direction=="row" else (j, i) + site_tag = self.site_tag(x, y) + tids = self._get_tids_from_tags(site_tag) + if len(tids) == 1: + tid, = tids + if tid not in tid_map: + tid_map[tid] = current_position + current_position +=1 + else: + if layer_tags is None: + _tags = [self.tensor_map[ix].tags for ix in tids] + _tmp_tags = _tags[0].copy() + for itag in _tags[1:]: + _tmp_tags &= itag + _layer_tags = sorted([list(i-_tmp_tags)[0] for i in _tags]) + else: + _layer_tags = layer_tags + for tag in _layer_tags: + tid, = self._get_tids_from_tags((site_tag, tag)) + if tid not in tid_map: + tid_map[tid] = current_position + current_position += 1 + return self._reorder_from_tid(tid_map, inplace) + + def _contract_boundary_full_bond( + self, + xrange, + yrange, + from_which, + max_bond, + cutoff=0.0, + method='eigh', + renorm=True, + optimize='auto-hq', + opposite_envs=None, + contract_boundary_opts=None, + ): + raise NotImplementedError + + def compute_environments( + self, + from_which, + xrange=None, + yrange=None, + max_bond=None, + *, + cutoff=1e-10, + canonize=True, + mode='mps', + layer_tags=None, + dense=False, + compress_opts=None, + envs=None, + **contract_boundary_opts + ): + """Compute the ``self.Lx`` 1D boundary tensor networks describing + the environments of rows and columns. The returned tensor network + also contains the original plaquettes + """ + direction = {"left": "col", + "right": "col", + "top": "row", + "bottom": "row"}[from_which] + tn = self.reorder(direction, layer_tags=layer_tags) + + r2d = Rotator2D(tn, xrange, yrange, from_which) + sweep, row_tag = r2d.vertical_sweep, r2d.row_tag + contract_boundary_fn = r2d.get_contract_boundary_fn() + + if envs is None: + envs = {} + + if mode == 'full-bond': + # set shared storage for opposite env contractions + contract_boundary_opts.setdefault('opposite_envs', {}) + + envs[from_which, sweep[0]] = FermionTensorNetwork([]) + first_row = row_tag(sweep[0]) + envs['mid', sweep[0]] = tn.select(first_row).copy() + if len(sweep)==1: + return envs + if dense: + tn ^= first_row + envs[from_which, sweep[1]] = tn.select(first_row).copy() + + for i in sweep[2:]: + iprevprev = i - 2 * sweep.step + iprev = i - sweep.step + envs['mid', iprev] = tn.select(row_tag(iprev)).copy() + if dense: + tn ^= (row_tag(iprevprev), row_tag(iprev)) + else: + contract_boundary_fn( + iprevprev, iprev, + max_bond=max_bond, + cutoff=cutoff, + mode=mode, + canonize=canonize, + layer_tags=layer_tags, + compress_opts=compress_opts, + **contract_boundary_opts, + ) + + envs[from_which, i] = tn.select(first_row).copy() + + return envs + + compute_bottom_environments = functools.partialmethod( + compute_environments, from_which='bottom') + + compute_top_environments = functools.partialmethod( + compute_environments, from_which='top') + + compute_left_environments = functools.partialmethod( + compute_environments, from_which='left') + + compute_right_environments = functools.partialmethod( + compute_environments, from_which='right') + + def _compute_plaquette_environments_row_first( + self, + x_bsz, + y_bsz, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, + second_dense=None, + row_envs=None, + **compute_environment_opts + ): + if second_dense is None: + second_dense = x_bsz < 2 + + # first we contract from either side to produce column environments + if row_envs is None: + row_envs = self.compute_row_environments( + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) + + # next we form vertical strips and contract from both top and bottom + # for each column + col_envs = dict() + for i in range(self.Lx - x_bsz + 1): + # + # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● + # ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ + # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┬ + # | | | | | | | | | | | | | | | | | | | | ┊ x_bsz + # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┴ + # ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ + # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● + # + row_i = FermionTensorNetwork(( + row_envs['bottom', i], + *[row_envs['mid', i+x] for x in range(x_bsz)], + row_envs['top', i + x_bsz - 1], + )).view_as_(FermionTensorNetwork2D, like=self) + # + # y_bsz + # <--> second_dense=True + # ●── ──● + # │ │ ╭── ──╮ + # ●── . . ──● │╭─ . . ─╮│ ┬ + # │ │ or ● ● ┊ x_bsz + # ●── . . ──● │╰─ . . ─╯│ ┴ + # │ │ ╰── ──╯ + # ●── ──● + # 'left' 'right' 'left' 'right' + # + col_envs[i] = row_i.compute_col_environments( + xrange=(max(i - 1, 0), min(i + x_bsz, self.Lx - 1)), + max_bond=max_bond, cutoff=cutoff, + canonize=canonize, layer_tags=layer_tags, + dense=second_dense, **compute_environment_opts) + + # then range through all the possible plaquettes, selecting the correct + # boundary tensors from either the column or row environments + plaquette_envs = dict() + for i0, j0 in product(range(self.Lx - x_bsz + 1), + range(self.Ly - y_bsz + 1)): + + # we want to select bordering tensors from: + # + # L──A──A──R <- A from the row environments + # │ │ │ │ + # i0+1 L──●──●──R + # │ │ │ │ <- L, R from the column environments + # i0 L──●──●──R + # │ │ │ │ + # L──B──B──R <- B from the row environments + # + # j0 j0+1 + # + env_ij = FermionTensorNetwork(( + col_envs[i0]['left', j0], + *[col_envs[i0]['mid', ix] for ix in range(j0, j0+y_bsz)], + col_envs[i0]['right', j0 + y_bsz - 1] + ), check_collisions=False) + + plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + + return plaquette_envs + + def _compute_plaquette_environments_col_first( + self, + x_bsz, + y_bsz, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, + second_dense=None, + col_envs=None, + **compute_environment_opts + ): + if second_dense is None: + second_dense = y_bsz < 2 + + # first we contract from either side to produce column environments + if col_envs is None: + col_envs = self.compute_col_environments( + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) + + # next we form vertical strips and contract from both top and bottom + # for each column + row_envs = dict() + for j in range(self.Ly - y_bsz + 1): + # + # y_bsz + # <--> + # + # ╭─╱o─╱o─╮ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o╱─o╱──● + # ┃╭─|o─|o─╮┃ + # ●──o╱─o╱──● + # + col_j = FermionTensorNetwork(( + col_envs['left', j], + *[col_envs['mid', j+jn] for jn in range(y_bsz)], + col_envs['right', j + y_bsz - 1], + )).view_as_(FermionTensorNetwork2D, like=self) + # + # y_bsz + # <--> second_dense=True + # ●──●──●──● ╭──●──╮ + # │ │ │ │ or │ ╱ ╲ │ 'top' + # . . . . ┬ + # ┊ x_bsz + # . . . . ┴ + # │ │ │ │ or │ ╲ ╱ │ 'bottom' + # ●──●──●──● ╰──●──╯ + # + row_envs[j] = col_j.compute_row_environments( + yrange=(max(j - 1, 0), min(j + y_bsz, self.Ly - 1)), + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, dense=second_dense, + **compute_environment_opts) + + # then range through all the possible plaquettes, selecting the correct + # boundary tensors from either the column or row environments + plaquette_envs = dict() + for i0, j0 in product(range(self.Lx - x_bsz + 1), + range(self.Ly - y_bsz + 1)): + + # we want to select bordering tensors from: + # + # A──A──A──A <- A from the row environments + # │ │ │ │ + # i0+1 L──●──●──R + # │ │ │ │ <- L, R from the column environments + # i0 L──●──●──R + # │ │ │ │ + # B──B──B──B <- B from the row environments + # + # j0 j0+1 + # + env_ij = FermionTensorNetwork(( + row_envs[j0]['bottom', i0], + *[row_envs[j0]['mid', ix] for ix in range(i0, i0+x_bsz)], + row_envs[j0]['top', i0 + x_bsz - 1] + ), check_collisions=False) + + plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + + return plaquette_envs + +def gate_string_split_(TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts): + # by default this means singuvalues are kept in the string 'blob' tensor + compress_opts.setdefault('absorb', 'right') + loc_info = dict([t.get_fermion_info() for t in original_ts]) + # the outer, neighboring indices of each tensor in the string + neighb_inds = [] + + # tensors we are going to contract in the blob, reindex some to attach gate + contract_ts = [] + fermion_info = [] + qpn_infos = [] + + for t, coo in zip(original_ts, string): + neighb_inds.append(tuple(ix for ix in t.inds if ix not in bonds_along)) + contract_ts.append(t.reindex_(reindex_map) if coo in where else t) + fermion_info.append(t.get_fermion_info()) + qpn_infos.append(t.data.dq) + + blob = tensor_contract(*contract_ts, TG, inplace=True) + regauged = [] + work_site = blob.get_fermion_info()[1] + fs = blob.fermion_owner[0] + + # one by one extract the site tensors again from each end + inner_ts = [None] * len(string) + i = 0 + j = len(string) - 1 + + while True: + lix = neighb_inds[i] + if i > 0: + lix += (bonds_along[i - 1],) + + # the original bond we are restoring + bix = bonds_along[i] + + # split the blob! + qpn_info = [blob.data.dq - qpn_infos[i], qpn_infos[i]] + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[i] = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, qpn_info=qpn_info, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + #coo_pair = tuple(sorted((string[i], string[i + 1]))) + coo_pair = (string[i], string[i+1]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to unguage later + if i != j - 1: + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((i + 1, bix, "back", s)) + + # move inwards along string, terminate if two ends meet + i += 1 + if i == j: + inner_ts[i] = blob + break + + # extract at end of string + lix = neighb_inds[j] + if j < len(string) - 1: + lix += (bonds_along[j],) + + # the original bond we are restoring + bix = bonds_along[j - 1] + + # split the blob! + qpn_info = [qpn_infos[j], blob.data.dq - qpn_infos[j]] + inner_ts[j], *maybe_svals, blob= blob.split( + left_inds=lix, get='tensors', bond_ind=bix, qpn_info=qpn_info, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[j-1], string[j]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to ungauge later + if j != i + 1: + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((j - 1, bix, "front", s)) + + # move inwards along string, terminate if two ends meet + j -= 1 + if j == i: + inner_ts[j] = blob + break + # SVD funcs needs to be modify and make sure S has even parity + for i, bix, location, s in regauged: + snew = inv_with_smudge(s, INVERSE_CUTOFF, gauge_smudge=0) + t = inner_ts[i] + t.multiply_index_diagonal_(bix, snew, location=location) + + revert_index_map = {v: k for k, v in reindex_map.items()} + for to, tn in zip(original_ts, inner_ts): + to.reindex_(revert_index_map) + tn.transpose_like_(to) + to.modify(data=tn.data) + + for i, (tid, _) in enumerate(fermion_info): + if i==0: + fs.replace_tensor(work_site, original_ts[i], tid=tid, virtual=True) + else: + fs.insert_tensor(work_site+i, original_ts[i], tid=tid, virtual=True) + + fs._reorder_from_dict(dict(fermion_info)) + +def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts): + compress_opts.setdefault('absorb', 'right') + + # indices to reduce, first and final include physical indices for gate + inds_to_reduce = [(bonds_along[0], site_ix[0])] + for b1, b2 in pairwise(bonds_along): + inds_to_reduce.append((b1, b2)) + inds_to_reduce.append((bonds_along[-1], site_ix[-1])) + + # tensors that remain on the string sites and those pulled into string + outer_ts, inner_ts = [], [] + fermion_info = [] + fs = TG.fermion_owner[0] + tid_lst = [] + for coo, rix, t in zip(string, inds_to_reduce, original_ts): + tq, tr = t.split(left_inds=None, right_inds=rix, + method='qr', get='tensors', absorb="right") + fermion_info.append(t.get_fermion_info()) + outer_ts.append(tq) + inner_ts.append(tr.reindex_(reindex_map) if coo in where else tr) + + for tq, tr, t in zip(outer_ts, inner_ts, original_ts): + isite = t.get_fermion_info()[1] + fs.replace_tensor(isite, tr, virtual=True) + fs.insert_tensor(isite+1, tq, virtual=True) + + blob = tensor_contract(*inner_ts, TG, inplace=True) + work_site = blob.get_fermion_info()[1] + regauged = [] + + # extract the new reduced tensors sequentially from each end + i = 0 + j = len(string) - 1 + + while True: + + # extract at beginning of string + lix = bonds(blob, outer_ts[i]) + if i == 0: + lix.add(site_ix[0]) + else: + lix.add(bonds_along[i - 1]) + + # the original bond we are restoring + bix = bonds_along[i] + + # split the blob! + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[i] = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[i], string[i + 1]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to unguage later + if i != j - 1: + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((i + 1, bix, "back", s)) + + # move inwards along string, terminate if two ends meet + i += 1 + if i == j: + inner_ts[i] = blob + break + + # extract at end of string + lix = bonds(blob, outer_ts[j]) + if j == len(string) - 1: + lix.add(site_ix[-1]) + else: + lix.add(bonds_along[j]) + + # the original bond we are restoring + bix = bonds_along[j - 1] + + # split the blob! + inner_ts[j], *maybe_svals, blob = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[j - 1], string[j]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to unguage later + if j != i + 1: + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((j - 1, bix, "front", s)) + + # move inwards along string, terminate if two ends meet + j -= 1 + if j == i: + inner_ts[j] = blob + break + + for i, (tid, _) in enumerate(fermion_info): + if i==0: + fs.replace_tensor(work_site, inner_ts[i], tid=tid, virtual=True) + else: + fs.insert_tensor(work_site+i, inner_ts[i], tid=tid, virtual=True) + new_ts = [ + tensor_contract(ts, tr, inplace=True).transpose_like_(to) + for to, ts, tr in zip(original_ts, outer_ts, inner_ts) + ] + + for i, bix, location, s in regauged: + snew = inv_with_smudge(s, INVERSE_CUTOFF, gauge_smudge=0) + t = new_ts[i] + t.multiply_index_diagonal_(bix, snew, location=location) + + for (tid, _), to, t in zip(fermion_info, original_ts, new_ts): + site = t.get_fermion_info()[1] + to.modify(data=t.data) + fs.replace_tensor(site, to, tid=tid, virtual=True) + + fs._reorder_from_dict(dict(fermion_info)) + + +class FermionTensorNetwork2DVector(FermionTensorNetwork2D, + FermionTensorNetwork, + TensorNetwork2DVector): + """Mixin class for a 2D square lattice vector TN, i.e. one with a single + physical index per site. + """ + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_site_ind_id', + ) + + def to_dense(self, *inds_seq, **contract_opts): + raise NotImplementedError + + def make_norm( + self, + mangle_append='*', + layer_tags=('KET', 'BRA'), + return_all=False, + ): + ket = self.copy() + ket.add_tag(layer_tags[0]) + + bra = ket.retag({layer_tags[0]: layer_tags[1]}) + bra = bra.H + if mangle_append: + bra.mangle_inner_(mangle_append) + norm = ket & bra + + if return_all: + return norm, ket, bra + return norm + + def gate( + self, + G, + where, + contract=False, + tags=None, + inplace=False, + info=None, + long_range_use_swaps=False, + long_range_path_sequence=None, + **compress_opts + ): + check_opt("contract", contract, (False, True, 'split', 'reduce-split')) + + psi = self if inplace else self.copy() + + if is_lone_coo(where): + where = (where,) + else: + where = tuple(where) + ng = len(where) + + tags = tags_to_oset(tags) + + # allow a matrix to be reshaped into a tensor if it factorizes + # i.e. (4, 4) assumed to be two qubit gate -> (2, 2, 2, 2) + + site_ix = [psi.site_ind(i, j) for i, j in where] + # new indices to join old physical sites to new gate + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + + TG = FermionTensor(G.copy(), inds=site_ix+bnds, tags=tags, left_inds=site_ix) # [bnds first, then site_ix] + + if contract is False: + # + # │ │ <- site_ix + # GGGGG + # │╱ │╱ <- bnds + # ──●───●── + # ╱ ╱ + # + psi.reindex_(reindex_map) + psi |= TG + return psi + + elif (contract is True) or (ng == 1): + # + # │╱ │╱ + # ──GGGGG── + # ╱ ╱ + # + psi.reindex_(reindex_map) + input_tids = psi._get_tids_from_inds(bnds, which='any') + isite = [psi.tensor_map[itid].get_fermion_info()[1] for itid in input_tids] + + psi.fermion_space.add_tensor(TG, virtual=True) + + # get the sites that used to have the physical indices + site_tids = psi._get_tids_from_inds(bnds, which='any') + + # pop the sites, contract, then re-add + pts = [psi._pop_tensor(tid) for tid in site_tids] + out = tensor_contract(*pts, TG, inplace=True) + psi |= out + psi.fermion_space.move(out.get_fermion_info()[0], min(isite)) + return psi + + # following are all based on splitting tensors to maintain structure + ij_a, ij_b = where + + # parse the argument specifying how to find the path between + # non-nearest neighbours + if long_range_path_sequence is not None: + # make sure we can index + long_range_path_sequence = tuple(long_range_path_sequence) + # if the first element is a str specifying move sequence, e.g. + # ('v', 'h') + # ('av', 'bv', 'ah', 'bh') # using swaps + manual_lr_path = not isinstance(long_range_path_sequence[0], str) + # otherwise assume a path has been manually specified, e.g. + # ((1, 2), (2, 2), (2, 3), ... ) + # (((1, 1), (1, 2)), ((4, 3), (3, 3)), ...) # using swaps + else: + manual_lr_path = False + + psi.fermion_space.add_tensor(TG, virtual=True) + # check if we are not nearest neighbour and need to swap first + if long_range_use_swaps: + raise NotImplementedError + + string = tuple(gen_long_range_path( + *where, sequence=long_range_path_sequence)) + + # the tensors along this string, which will be updated + original_ts = [psi[coo] for coo in string] + + # the len(string) - 1 indices connecting the string + bonds_along = [next(iter(bonds(t1, t2))) + for t1, t2 in pairwise(original_ts)] + + if contract == 'split': + # + # │╱ │╱ │╱ │╱ + # ──GGGGG── ==> ──G┄┄┄G── + # ╱ ╱ ╱ ╱ + # + gate_string_split_( + TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts) + + elif contract == 'reduce-split': + # + # │ │ │ │ + # GGGGG GGG │ │ + # │╱ │╱ ==> ╱│ │ ╱ ==> ╱│ │ ╱ │╱ │╱ + # ──●───●── ──>─●─●─<── ──>─GGG─<── ==> ──G┄┄┄G── + # ╱ ╱ ╱ ╱ ╱ ╱ ╱ ╱ + # + # + gate_string_reduce_split_( + TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts) + return psi + + gate_ = functools.partialmethod(gate, inplace=True) + + def compute_local_expectation( + self, + terms, + max_bond=None, + *, + cutoff=1e-10, + canonize=True, + mode='mps', + layer_tags=('KET', 'BRA'), + normalized=False, + autogroup=True, + contract_optimize='auto-hq', + return_all=False, + plaquette_envs=None, + plaquette_map=None, + **plaquette_env_options, + ): + norm, ket, bra = self.make_norm(return_all=True, layer_tags=layer_tags) + plaquette_env_options["max_bond"] = max_bond + plaquette_env_options["cutoff"] = cutoff + plaquette_env_options["canonize"] = canonize + plaquette_env_options["mode"] = mode + plaquette_env_options["layer_tags"] = layer_tags + + # factorize both local and global phase on the operator tensors + new_terms = dict() + for where, op in terms.items(): + if is_lone_coo(where): + _where = (where,) + else: + _where = tuple(where) + ng = len(_where) + site_ix = [bra.site_ind(i, j) for i, j in _where] + bnds = [rand_uuid() for _ in range(ng)] + TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) + new_terms[where] = bra.fermion_space.move_past(TG).data + + if plaquette_envs is None: + plaquette_envs = dict() + for x_bsz, y_bsz in calc_plaquette_sizes(terms.keys(), autogroup): + plaquette_envs.update(norm.compute_plaquette_environments( + x_bsz=x_bsz, y_bsz=y_bsz, **plaquette_env_options)) + + if plaquette_map is None: + # work out which plaquettes to use for which terms + plaquette_map = calc_plaquette_map(plaquette_envs) + + # now group the terms into just the plaquettes we need + plaq2coo = defaultdict(list) + for where, G in new_terms.items(): + p = plaquette_map[where] + plaq2coo[p].append((where, G)) + + expecs = dict() + for p in plaq2coo: + # site tags for the plaquette + tn = plaquette_envs[p] + if normalized: + norm_i0j0 = tn.contract(all, optimize=contract_optimize) + else: + norm_i0j0 = None + + for where, G in plaq2coo[p]: + newtn = tn.copy() + if is_lone_coo(where): + _where = (where,) + else: + _where = tuple(where) + ng = len(_where) + site_ix = [bra.site_ind(i, j) for i, j in _where] + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + TG = FermionTensor(G.copy(), inds=site_ix+bnds, left_inds=site_ix) + ntsr = len(newtn.tensor_map) + fs = newtn.fermion_space + tids = newtn._get_tids_from_inds(site_ix, which='any') + for tid_ in tids: + tsr = newtn.tensor_map[tid_] + if layer_tags[0] in tsr.tags: + tsr.reindex_(reindex_map) + newtn.add_tensor(TG, virtual=True) + expec_ij = newtn.contract(all, optimize=contract_optimize) + expecs[where] = expec_ij, norm_i0j0 + + if return_all: + return expecs + + if normalized: + return functools.reduce(add, (e / n for e, n in expecs.values())) + + return functools.reduce(add, (e for e, _ in expecs.values())) + +class FermionTensorNetwork2DOperator(FermionTensorNetwork2D, + FermionTensorNetwork, + TensorNetwork2DOperator): + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_upper_ind_id', + '_lower_ind_id', + ) + + def to_dense(self, *inds_seq, **contract_opts): + raise NotImplementedError + + +class FermionTensorNetwork2DFlat(FermionTensorNetwork2D, + FermionTensorNetwork, + TensorNetwork2DFlat): + """Mixin class for a 2D square lattice tensor network with a single tensor + per site, for example, both PEPS and PEPOs. + """ + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + ) + + def expand_bond_dimension(self, new_bond_dim, inplace=True, bra=None, + rand_strength=0.0): + raise NotImplementedError + + +class FPEPS(FermionTensorNetwork2DVector, + FermionTensorNetwork2DFlat, + FermionTensorNetwork2D, + FermionTensorNetwork, + PEPS): + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_site_ind_id', + ) + + def __init__(self, arrays, *, shape='urdlp', tags=None, + site_ind_id='k{},{}', site_tag_id='I{},{}', + row_tag_id='ROW{}', col_tag_id='COL{}', **tn_opts): + + if isinstance(arrays, FPEPS): + super().__init__(arrays) + return + + tags = tags_to_oset(tags) + self._site_ind_id = site_ind_id + self._site_tag_id = site_tag_id + self._row_tag_id = row_tag_id + self._col_tag_id = col_tag_id + + arrays = tuple(tuple(x for x in xs) for xs in arrays) + self._Lx = len(arrays) + self._Ly = len(arrays[0]) + tensors = [] + + # cache for both creating and retrieving indices + ix = defaultdict(rand_uuid) + + for i, j in product(range(self.Lx), range(self.Ly)): + array = arrays[i][j] + + # figure out if we need to transpose the arrays from some order + # other than up right down left physical + array_order = shape + if i == self.Lx - 1: + array_order = array_order.replace('u', '') + if j == self.Ly - 1: + array_order = array_order.replace('r', '') + if i == 0: + array_order = array_order.replace('d', '') + if j == 0: + array_order = array_order.replace('l', '') + + # allow convention of missing bonds to be singlet dimensions + if len(array.shape) != len(array_order): + raise TypeError("input array does not ahve right shape of (Lx, Ly)") + + transpose_order = tuple( + array_order.find(x) for x in 'urdlp' if x in array_order + ) + if transpose_order != tuple(range(len(array_order))): + array = np.transpose(array, transpose_order) + + # get the relevant indices corresponding to neighbours + inds = [] + if 'u' in array_order: + inds.append(ix[(i + 1, j), (i, j)]) + if 'r' in array_order: + inds.append(ix[(i, j), (i, j + 1)]) + if 'd' in array_order: + inds.append(ix[(i, j), (i - 1, j)]) + if 'l' in array_order: + inds.append(ix[(i, j - 1), (i, j)]) + inds.append(self.site_ind(i, j)) + + # mix site, row, column and global tags + + ij_tags = tags | oset((self.site_tag(i, j), + self.row_tag(i), + self.col_tag(j))) + + # create the site tensor! + tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) + + super().__init__(tensors, virtual=True, **tn_opts) + + @classmethod + def rand(cls, Lx, Ly, bond_dim, symmetry_infos, dq_infos, + phys_dim=2, seed=None, dtype=float, **peps_opts): + r'''Construct a random 2d FPEPS with given quantum particle number distribution + + Parameters + ---------- + Lx : int + The number of rows. + Ly : int + The number of columns. + bond_dim: int + Virtual bond dimension for each virtual block + symmetry_infos : dict[tuple[int], list/tuple] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers in each dimension ordered by up, right, down, left and physical, + which will be supplied to ``rand_all_blocks`` + dq_infos: dict[tuple[ix], int or tuple/list of integers] + A dictionary mapping the site coordinates to the net quantum particle numbers + on that site, which will be supplied to ``rand_all_blocks`` + phys_dim: int + Physical bond dimension for each physical block + seed : int, optional + A random seed. + dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional + The underlying data type. + pepes_opts + Supplied to :class:`~quimb.tensor.fermion_2d.FPEPS`. + + Returns + ------- + FPEPS + ''' + if seed is not None: + np.random.seed(seed) + pattern_map = {"d": "+", "l":"+", "p":"+", + "u": "-", "r":"-"} + + arrays = [[None for _ in range(Ly)] for _ in range(Lx)] + for i, j in product(range(Lx), range(Ly)): + shape = [] + pattern = "" + if i != Lx - 1: # bond up + shape.append(bond_dim) + pattern += pattern_map['u'] + if j != Ly - 1: # bond right + shape.append(bond_dim) + pattern += pattern_map['r'] + if i != 0: # bond down + shape.append(bond_dim) + pattern += pattern_map['d'] + if j != 0: # bond left + shape.append(bond_dim) + pattern += pattern_map['l'] + shape.append(phys_dim) + pattern += pattern_map['p'] + symmetry_info = symmetry_infos[i, j] + arrays[i][j] = rand_all_blocks(shape, symmetry_info, dtype=dtype, pattern=pattern, dq=dq_infos[i, j]) + return FPEPS(arrays, **peps_opts) + + @classmethod + def gen_site_prod_state(cls, Lx, Ly, phys_infos, phys_dim=1, **peps_opts): + r'''Construct a 2d FPEPS as site product state + + Parameters + ---------- + Lx : int + The number of rows. + Ly : int + The number of columns. + phys_infos: dict[tuple[int], int or tuple/list] + A dictionary mapping the site coordinates to the specified single quantum + particle state + phys_dim: int + Physical bond dimension for the physical block + pepes_opts + Supplied to :class:`~quimb.tensor.fermion_2d.FPEPS`. + + Returns + ------- + FPEPS + ''' + pattern_map = {"d": "+", "l":"+", "p":"+", + "u": "-", "r":"-"} + arrays = [[None for _ in range(Ly)] for _ in range(Lx)] + for i, j in product(range(Lx), range(Ly)): + shape = [] + pattern = "" + if i != Lx - 1: # bond up + shape.append(1) + pattern += pattern_map['u'] + if j != Ly - 1: # bond right + shape.append(1) + pattern += pattern_map['r'] + if i != 0: # bond down + shape.append(1) + pattern += pattern_map['d'] + if j != 0: # bond left + shape.append(1) + pattern += pattern_map['l'] + shape.append(phys_dim) + pattern += pattern_map['p'] + arrays[i][j] = ones_single_block(shape, pattern, phys_infos[i, j], ind=len(shape)-1) + return FPEPS(arrays, **peps_opts) + + def add_PEPS(self, other, inplace=False): + raise NotImplementedError + +class FPEPO(FermionTensorNetwork2DOperator, + FermionTensorNetwork2DFlat, + FermionTensorNetwork2D, + FermionTensorNetwork, + PEPO): + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_upper_ind_id', + '_lower_ind_id', + ) + + def __init__(self, arrays, *, shape='urdlbk', tags=None, + upper_ind_id='k{},{}', lower_ind_id='b{},{}', + site_tag_id='I{},{}', row_tag_id='ROW{}', col_tag_id='COL{}', + **tn_opts): + + if isinstance(arrays, FPEPO): + super().__init__(arrays) + return + + tags = tags_to_oset(tags) + self._upper_ind_id = upper_ind_id + self._lower_ind_id = lower_ind_id + self._site_tag_id = site_tag_id + self._row_tag_id = row_tag_id + self._col_tag_id = col_tag_id + + arrays = tuple(tuple(x for x in xs) for xs in arrays) + self._Lx = len(arrays) + self._Ly = len(arrays[0]) + tensors = [] + + # cache for both creating and retrieving indices + ix = defaultdict(rand_uuid) + + for i, j in product(range(self.Lx), range(self.Ly)): + array = arrays[i][j] + + # figure out if we need to transpose the arrays from some order + # other than up right down left physical + array_order = shape + if i == self.Lx - 1: + array_order = array_order.replace('u', '') + if j == self.Ly - 1: + array_order = array_order.replace('r', '') + if i == 0: + array_order = array_order.replace('d', '') + if j == 0: + array_order = array_order.replace('l', '') + + # allow convention of missing bonds to be singlet dimensions + if len(array.shape) != len(array_order): + raise ValueError("Input arrays do not have right shape (Lx, Ly)") + + transpose_order = tuple( + array_order.find(x) for x in 'urdlbk' if x in array_order + ) + if transpose_order != tuple(range(len(array_order))): + array = np.transpose(array, transpose_order) + + # get the relevant indices corresponding to neighbours + inds = [] + if 'u' in array_order: + inds.append(ix[(i + 1, j), (i, j)]) + if 'r' in array_order: + inds.append(ix[(i, j), (i, j + 1)]) + if 'd' in array_order: + inds.append(ix[(i, j), (i - 1, j)]) + if 'l' in array_order: + inds.append(ix[(i, j - 1), (i, j)]) + inds.append(self.lower_ind(i, j)) + inds.append(self.upper_ind(i, j)) + + # mix site, row, column and global tags + ij_tags = tags | oset((self.site_tag(i, j), + self.row_tag(i), + self.col_tag(j))) + + # create the site tensor! + tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) + + super().__init__(tensors, virtual=True, **tn_opts) + + @classmethod + def rand(cls, Lx, Ly, bond_dim, phys_dim=2, herm=False, + dtype=float, seed=None, **pepo_opts): + raise NotImplementedError + + def add_PEPO(self, other, inplace=False): + """Add this PEPO with another. + """ + raise NotImplementedError diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py new file mode 100644 index 00000000..0e503bf8 --- /dev/null +++ b/quimb/tensor/fermion_2d_tebd.py @@ -0,0 +1,456 @@ +import numpy as np +import random +import collections +from itertools import product +from ..utils import pairwise +from .tensor_2d_tebd import SimpleUpdate as _SimpleUpdate +from .tensor_2d_tebd import conditioner +from .tensor_2d import gen_long_range_path, nearest_neighbors +from .block_interface import eye, to_exponential, Hubbard +from . import block_tools + +INVERSE_CUTOFF = 1e-10 + +def Hubbard2D(t, u, Lx, Ly, mu=0., symmetry=None): + """Create a LocalHam2D object for 2D Hubbard Model + + Parameters + ---------- + t : scalar + The hopping parameter + u : scalar + Onsite columb repulsion + Lx: int + Size in x direction + Ly: int + Size in y direction + mu: scalar, optional + Chemical potential + symmetry: {"z2",'u1', 'z22', 'u11'}, optional + Symmetry in the backend + + Returns + ------- + a LocalHam2D object + """ + ham = dict() + count_neighbour = lambda i,j: (i>0) + (i0) + (j b) + T1_L, T1_R = T1.split(left_inds=left_env_ix, right_inds=shared_ix, absorb="right", + get='tensors', method='qr') + T2_L, T2_R = T2.split(left_inds=shared_ix, absorb="left", get='tensors', method='qr') + # b) -> c) + M = (T1_R @ T2_L) + M.drop_tags() + # c) -> d) + M_L, *s, M_R = M.split(left_inds=T1_L.bonds(M), get='tensors', + absorb=absorb, **compress_opts) + + # make sure old bond being used + ns_ix, = M_L.bonds(M_R) + M_L.reindex_({ns_ix: shared_ix[0]}) + M_R.reindex_({ns_ix: shared_ix[0]}) + + # d) -> e) + T1C = T1_L.contract(M_L) + T2C = M_R.contract(T2_R) + else: + T12 = T1 @ T2 + T1C, *s, T2C = T12.split(left_inds=left_env_ix, get='tensors', + absorb=absorb, **compress_opts) + T1C.transpose_like_(T1) + T2C.transpose_like_(T2) + + # update with the new compressed data + T1.modify(data=T1C.data, inds=T1C.inds) + T2.modify(data=T2C.data, inds=T2C.inds) + + if s and info is not None: + info['singular_values'], = s + + +def tensor_balance_bond(t1, t2, smudge=1e-6): + ix, = t1.bonds(t2) + t1H = t1.H.reindex_({ix: ix+'*'}) + t2H = t2.H.reindex_({ix: ix+'*'}) + out1 = _core_contract(t1H, t1) + out2 = _core_contract(t2H, t2) + s1, s2 = get_smudge_balance(out1, out2, ix, smudge) + t1.multiply_index_diagonal_(ix, s1, location="back") + t2.multiply_index_diagonal_(ix, s2, location="front") + +# --------------------------------------------------------------------------- # +# Tensor Class # +# --------------------------------------------------------------------------- # + +class BlockTensor(Tensor): + + __slots__ = ('_data', '_inds', '_tags', '_left_inds', '_owners') + + def _apply_function(self, fn): + self._data = apply(self.data, fn) + + def expand_ind(self, ind, size): + raise NotImplementedError + + def new_ind(self, name, size=1, axis=0): + raise NotImplementedError + + @property + def shape(self): + """Return the "inflated" shape composed of maximal size for each leg + """ + return self.data.shape + + def astype(self, dtype, inplace=False): + raise NotImplementedError + + def ind_size(self, ind): + ax = self.inds.index(ind) + return self.data.get_bond_info(ax) + + def conj(self, inplace=False): + """Conjugate this tensors data (does nothing to indices). + """ + t = self if inplace else self.copy() + t.modify(data=t.data.conj()) + return t + + conj_ = functools.partialmethod(conj, inplace=True) + + @property + def H(self): + t = self.copy() + t.modify(data=t.data.dagger, inds=t.inds[::-1]) + return t + + def transpose(self, *output_inds, inplace=False): + t = self if inplace else self.copy() + + output_inds = tuple(output_inds) # need to re-use this. + + if set(t.inds) != set(output_inds): + raise ValueError("'output_inds' must be permutation of the current" + f" tensor indices, but {set(t.inds)} != " + f"{set(output_inds)}") + + current_ind_map = {ind: i for i, ind in enumerate(t.inds)} + out_shape = tuple(current_ind_map[i] for i in output_inds) + + t.modify(data=np.transpose(t.data, out_shape), inds=output_inds) + return t + + transpose_ = functools.partialmethod(transpose, inplace=True) + + def trace(self, ind1, ind2, inplace=False): + raise NotImplementedError + + def sum_reduce(self, ind, inplace=False): + raise NotImplementedError + + def collapse_repeated(self, inplace=False): + raise NotImplementedError + + def contract(self, *others, output_inds=None, **opts): + return tensor_contract(self, *others, output_inds=output_inds, **opts) + + def direct_product(self, other, sum_inds=(), inplace=False): + raise NotImplementedError + + def split(self, *args, **kwargs): + return tensor_split(self, *args, **kwargs) + + def distance(self, other, **contract_opts): + raise NotImplementedError + + def entropy(self, left_inds, method='svd'): + raise NotImplementedError + + def fuse(self, fuse_map, inplace=False): + raise NotImplementedError + + def unfuse(self, unfuse_map, shape_map, inplace=False): + raise NotImplementedError + + def to_dense(self, *inds_seq, to_qarray=True): + raise NotImplementedError + + def squeeze(self, include=None, inplace=False): + raise NotImplementedError + + def norm(self): + """Frobenius norm of this tensor. + """ + return self.data.norm() + + def symmetrize(self, ind1, ind2, inplace=False): + raise NotImplementedError + + def unitize(self, left_inds=None, inplace=False, method='qr'): + raise NotImplementedError + + def randomize(self, dtype=None, inplace=False, **randn_opts): + raise NotImplementedError + + def flip(self, ind, inplace=False): + raise NotImplementedError + + def multiply_index_diagonal(self, ind, x, inplace=False, location="front"): + if location not in ["front", "back"]: + raise ValueError("invalid for the location of the diagonal") + t = self if inplace else self.copy() + ax = t.inds.index(ind) + if isinstance(x, Tensor): + x = x.data + if location=="front": + out = np.tensordot(x, t.data, axes=((1,), (ax,))) + transpose_order = list(range(1, ax+1)) + [0] + list(range(ax+1, t.ndim)) + else: + out = np.tensordot(t.data, x, axes=((ax,),(0,))) + transpose_order = list(range(ax)) + [t.ndim-1] + list(range(ax, t.ndim-1)) + data = np.transpose(out, transpose_order) + t.modify(data=data) + return t + + multiply_index_diagonal_ = functools.partialmethod( + multiply_index_diagonal, inplace=True) + + def almost_equals(self, other, **kwargs): + raise NotImplementedError + + def __and__(self, other): + """Combine with another ``Tensor`` or ``TensorNetwork`` into a new + ``TensorNetwork``. + """ + return BlockTensorNetwork((self, other)) + + def __or__(self, other): + """Combine virtually (no copies made) with another ``Tensor`` or + ``TensorNetwork`` into a new ``TensorNetwork``. + """ + return BlockTensorNetwork((self, other), virtual=True) + + _EXTRA_PROPS = () + + def draw(self, *args, **kwargs): + """Plot a graph of this tensor and its indices. + """ + draw_tn(BlockTensorNetwork((self,)), *args, **kwargs) + + graph = draw + +# --------------------------------------------------------------------------- # +# Tensor Network Class # +# --------------------------------------------------------------------------- # + +class BlockTensorNetwork(TensorNetwork): + + __slots__ = ('_inner_inds', '_outer_inds', '_tid_counter') + _EXTRA_PROPS = () + _CONTRACT_STRUCTURED = False + + def replace_with_identity(self, where, which='any', inplace=False): + raise NotImplementedError + + def replace_with_svd(self, where, left_inds, eps, *, which='any', + right_inds=None, method='isvd', max_bond=None, + absorb='both', cutoff_mode='rel', renorm=None, + ltags=None, rtags=None, keep_tags=True, + start=None, stop=None, inplace=False): + raise NotImplementedError + + def replace_section_with_svd(self, start, stop, eps, + **replace_with_svd_opts): + raise NotImplementedError + + def convert_to_zero(self): + raise NotImplementedError + + def contract_between(self, tags1, tags2, **contract_opts): + tid1, = self._get_tids_from_tags(tags1, which='all') + tid2, = self._get_tids_from_tags(tags2, which='all') + + # allow no-op for same tensor specified twice ('already contracted') + if tid1 == tid2: + return + + T1 = self._pop_tensor(tid1) + T2 = self._pop_tensor(tid2) + T12 = tensor_contract(T1, T2, **contract_opts) + self.add_tensor(T12, tid=tid2, virtual=True) + + def contract_ind(self, ind, **contract_opts): + """Contract tensors connected by ``ind``. + """ + tids = self._get_tids_from_inds(ind) + ts = [self._pop_tensor(tid) for tid in tids] + self |= tensor_contract(*ts, **contract_opts) + + def _compress_between_tids( + self, + tid1, + tid2, + canonize_distance=None, + canonize_opts=None, + equalize_norms=False, + **compress_opts + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + + if canonize_distance: + raise NotImplementedError + + tensor_compress_bond(Tl, Tr, **compress_opts) + + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + + def _canonize_between_tids( + self, + tid1, + tid2, + equalize_norms=False, + **canonize_opts, + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + tensor_canonize_bond(Tl, Tr, **canonize_opts) + + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + + def new_bond(self, tags1, tags2, **opts): + raise NotImplementedError + + def insert_gauge(self, U, where1, where2, Uinv=None, tol=1e-10): + raise NotImplementedError + + # ----------------------- contracting the network ----------------------- # + def contract_tags(self, tags, inplace=False, which='any', **opts): + """Contract the tensors that match any or all of ``tags``. + + Parameters + ---------- + tags : sequence of str + The list of tags to filter the tensors by. Use ``...`` + (``Ellipsis``) to contract all. + inplace : bool, optional + Whether to perform the contraction inplace. + which : {'all', 'any'} + Whether to require matching all or any of the tags. + + Returns + ------- + TensorNetwork, Tensor or scalar + The result of the contraction, still a ``TensorNetwork`` if the + contraction was only partial. + + See Also + -------- + contract, contract_cumulative, contract_structured + """ + untagged_tn, tagged_ts = self.partition_tensors( + tags, inplace=inplace, which=which) + + if not tagged_ts: + raise ValueError("No tags were found - nothing to contract. " + "(Change this to a no-op maybe?)") + + contracted = tensor_contract(*tagged_ts, **opts) + + if untagged_tn is None: + return contracted + + untagged_tn.add_tensor(contracted, virtual=True) + return untagged_tn + + def contract(self, tags=..., inplace=False, **opts): + if tags is all: + return tensor_contract(*self, **opts) + + # this checks whether certain TN classes have a manually specified + # contraction pattern (e.g. 1D along the line) + if self._CONTRACT_STRUCTURED: + if (tags is ...) or isinstance(tags, slice): + return self.contract_structured(tags, inplace=inplace, **opts) + + # else just contract those tensors specified by tags. + return self.contract_tags(tags, inplace=inplace, **opts) + + contract_ = functools.partialmethod(contract, inplace=True) + + def __matmul__(self, other): + """Overload "@" to mean full contraction with another network. + """ + return BlockTensorNetwork((self, other)) ^ ... + + def aslinearoperator(self, left_inds, right_inds, ldims=None, rdims=None, + backend=None, optimize='auto'): + raise NotImplementedError + + def to_dense(self, *inds_seq, to_qarray=True, **contract_opts): + raise NotImplementedError + + def distance(self, *args, **kwargs): + raise NotImplementedError + + def fit( + self, + tn_target, + method='als', + tol=1e-9, + inplace=False, + progbar=False, + **fitting_opts + ): + raise NotImplementedError + + # --------------- information about indices and dimensions -------------- # + + def squeeze(self, fuse=False, inplace=False): + raise NotImplementedError + + def unitize(self, mode='error', inplace=False, method='qr'): + raise NotImplementedError + + def balance_bonds(self, inplace=False): + tn = self if inplace else self.copy() + + for ix, tids in tn.ind_map.items(): + if len(tids) != 2: + continue + tid1, tid2 = tids + t1, t2 = [tn.tensor_map[x] for x in (tid1, tid2)] + tensor_balance_bond(t1, t2) + + return tn + + balance_bonds_ = functools.partialmethod(balance_bonds, inplace=True) + + def fuse_multibonds(self, inplace=False): + raise NotImplementedError + + def rank_simplify( + self, + output_inds=None, + equalize_norms=False, + cache=None, + inplace=False, + ): + raise NotImplementedError + + def diagonal_reduce( + self, + output_inds=None, + atol=1e-12, + cache=None, + inplace=False, + ): + raise NotImplementedError diff --git a/quimb/tensor/tensor_core.py b/quimb/tensor/tensor_core.py index bf4c0906..a86cdcbb 100644 --- a/quimb/tensor/tensor_core.py +++ b/quimb/tensor/tensor_core.py @@ -1656,7 +1656,7 @@ def copy(self, deep=False, virtual=False): tensor network, this simply returns ``self``. """ if not (deep or virtual): - return Tensor(self, None) + return self.__class__(self, None) if deep and virtual: raise ValueError("Copy can't be both deep and virtual.") @@ -2840,8 +2840,6 @@ def __or__(self, other): """ return TensorNetwork((self, other), virtual=True) - _EXTRA_PROPS = () - @classmethod def from_TN(cls, tn, like=None, inplace=False, **kwargs): """Construct a specific tensor network subclass (i.e. one with some @@ -5565,7 +5563,7 @@ def insert_operator(self, A, where1, where2, tags=None, inplace=False): # reindex one tensor, and add a new A tensor joining the bonds nbnd = rand_uuid() T2.reindex_({bnd: nbnd}) - TA = Tensor(A, inds=(bnd, nbnd), tags=tags) + TA = A.__class__(A, inds=(bnd, nbnd), tags=tags) tn |= TA return tn diff --git a/quimb/tensor/test/test_block_numerics.py b/quimb/tensor/test/test_block_numerics.py new file mode 100644 index 00000000..fc744301 --- /dev/null +++ b/quimb/tensor/test/test_block_numerics.py @@ -0,0 +1,212 @@ +import pytest +import numpy as np +from quimb.tensor.tensor_block import ( + BlockTensor, BlockTensorNetwork, tensor_contract) +from quimb.tensor.block_gen import rand_all_blocks as rand +from quimb.tensor.block_interface import set_options + +@pytest.fixture(scope='class') +def u11setup(request): + bond = [(0,0), (1,1), (1,-1), (2,0)] + set_options(symmetry="u11", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(1,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(-1,-1)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,-1)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(-1,1)) + + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,-1)) + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z22setup(request): + bond = [(0,0), (0,1), (1,0), (1,1)] + set_options(symmetry="z22", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(0,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(1,0)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,0)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(0,1)) + + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,0)) + + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def u1setup(request): + bond = (0,1,2,3) + set_options(symmetry="u1", fermion=False) + + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=-1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=-2) + + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z4setup(request): + bond = (0,1,2,3) + set_options(symmetry="z4", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=0) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=1) + + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z2setup(request): + bond = (0,1) + set_options(symmetry="z2", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=0) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=1) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=0) + + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.mark.usefixtures('u11setup') +class TestU11: + def test_backend(self): + Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_between(self): + tn1 = self.tn.copy() + tn1.contract_between("abc", "ega") + Tegbc = tn1["abc"].transpose("e","g","b","c") + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_all(self): + result = self.tn.contract(all) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + deg1 = np.tensordot(self.bcd, egbc, axes=[(0,1),(2,3)]) + ref_val = np.tensordot(self.deg, deg1, axes=[(0,1,2),]*2) + err = abs(result - ref_val) + assert err < 1e-10 + + def test_contract_ind(self): + tn1 = self.tn.copy() + tn1.contract_ind("d") + out = tn1["deg"].transpose("e","g","b","c") + egbc = np.tensordot(self.deg, self.bcd, axes=[(0,),(2,)]) + err = (egbc - out.data).norm() + assert err < 1e-10 + + def test_balance_bonds(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.balance_bonds() + exact_bb = norm1.contract(all, optimize="auto-hq") + assert exact_bb == pytest.approx(exact, rel=1e-2) + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert (tsr1-tsr).data.norm() >1e-10 + + def test_equlaize_norm(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.equalize_norms() + exact_en = norm1.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + ref1 = list(norm1.tensor_map.values())[0].norm() + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert tsr1.norm() == pytest.approx(ref1, rel=1e-2) + + def test_split(self): + Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) + u, s, v = Tegbc.split(("e","b"), method="svd", absorb=None) + out = tensor_contract(u,s,v, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + + for absorb in ["left", "right"]: + for method in ["qr", "svd"]: + l, r = Tegbc.split(("g","c"), method=method, absorb=absorb) + out = tensor_contract(l, r, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + +@pytest.mark.usefixtures('u1setup') +class TestU1(TestU11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestZ4(TestU11): + pass + +@pytest.mark.usefixtures('z22setup') +class TestZ22(TestU11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestZ2(TestU11): + pass diff --git a/quimb/tensor/test/test_fermion_2d.py b/quimb/tensor/test/test_fermion_2d.py new file mode 100644 index 00000000..196af9aa --- /dev/null +++ b/quimb/tensor/test/test_fermion_2d.py @@ -0,0 +1,269 @@ +import pytest +import numpy as np +from itertools import product +from quimb.tensor.block_interface import set_options +from quimb.tensor.fermion_2d import FPEPS +from quimb.tensor.block_gen import rand_all_blocks as rand + +set_options(fermion=True) +@pytest.fixture(scope='class') +def u11setup(request): + bond = ((0,0),(1,1),(1,-1),(2,0)) + set_options(symmetry="u11") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + state_map = {0:(0,0), 1:(1,1), 2:(1,-1), 3:(2,0)} + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = state_map[np.random.randint(0,4)] + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def z22setup(request): + bond = ((0,0),(0,1),(1,0),(1,1)) + set_options(symmetry="z22") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + state_map = {0:(0,0), 1:(0,1), 2:(1,1), 3:(2,0)} + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = state_map[np.random.randint(0,4)] + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def u1setup(request): + bond = (0,1,2) + set_options(symmetry="u1") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = np.random.randint(0,3) + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def z4setup(request): + bond = (0,1,2,3) + set_options(symmetry="z4") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = np.random.randint(0,4) + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def z2setup(request): + bond = (0,1) + set_options(symmetry="z2") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = np.random.randint(0,2) + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.mark.usefixtures('u11setup') +class TestPEPS_U11: + @pytest.mark.parametrize('where', [ + (0, 0), (0, 1), (0, 2), (2, 0), + (1, 0), (1, 1), (1, 2), (2, 1) + ]) + @pytest.mark.parametrize('contract', [False, True]) + def test_gate_2d_single_site(self, where, contract): + G = self.G + Lx = 3 + Ly = 3 + psi = self.peps + xe = psi.compute_local_expectation({where: G}) + tn = psi.H & psi.gate(G, where, contract=contract) + assert len(tn.tensors) == 2 * Lx * Ly + int(not contract) + assert tn ^ all == pytest.approx(xe) + + @pytest.mark.parametrize( + 'contract', [False, True, 'split', 'reduce-split']) + @pytest.mark.parametrize('where', [ + [(1, 1), (2, 1)], [(2, 1), (2, 2)] + ]) + def test_gate_2d_two_site(self, where, contract): + Hij = self.Hij + psi = self.peps + xe = psi.compute_local_expectation({tuple(where): Hij}) + tn = psi.H & psi.gate(Hij, tuple(where), contract=contract) + change = {False: 1, True: -1, 'split': 0, 'reduce-split': 0}[contract] + assert len(tn.tensors) == 2 * self.Lx * self.Ly + change + assert tn ^ all == pytest.approx(xe) + + def test_contract_2d_one_layer_boundary(self): + psi = self.peps + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=6) + assert xt == pytest.approx(xe, rel=1e-2) + + def test_contract_2d_two_layer_boundary(self): + psi = self.peps + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=6, layer_tags=['KET', 'BRA']) + assert xt == pytest.approx(xe, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_row_envs(self, two_layer): + psi = self.peps + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_row_environments(**compress_opts) + + for i in range(norm.Lx): + norm_i = ( + row_envs['bottom', i] & + row_envs['mid', i] & + row_envs['top', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_col_envs(self, two_layer): + psi = self.peps + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_col_environments(**compress_opts) + + for i in range(norm.Ly): + norm_i = ( + row_envs['left', i] & + row_envs['mid', i] & + row_envs['right', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + def test_normalize(self): + psi = self.peps + norm = psi.make_norm().contract(all) + assert norm != pytest.approx(1.0) + psi.normalize_(balance_bonds=True, equalize_norms=True, cutoff=2e-3) + norm = psi.make_norm().contract(all) + assert norm == pytest.approx(1.0, rel=1e-2) + + def test_compute_local_expectation_one_sites(self): + peps = self.peps + coos = list(product(range(self.Lx), range(self.Ly))) + terms = {coo: self.G for coo in coos} + + expecs = peps.compute_local_expectation( + terms, + normalized=True, + return_all=True) + + norm = peps.compute_norm() + for where, G in terms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where) + tn = ket & bra + out = tn.contract_boundary(max_bond=12) + assert out == pytest.approx(expecs[where][0], rel=1e-2) + assert norm == pytest.approx(expecs[where][1], rel=1e-2) + + def test_compute_local_expectation_two_sites(self): + normalized=True + peps = self.peps + Hij = self.Hij + hterms = {coos: Hij for coos in peps.gen_horizontal_bond_coos()} + vterms = {coos: Hij for coos in peps.gen_vertical_bond_coos()} + + opts = dict(cutoff=2e-3, max_bond=12, contract_optimize='random-greedy') + norm = peps.compute_norm(max_bond=12, cutoff=2e-3) + he = peps.compute_local_expectation( + hterms, normalized=normalized, return_all=True, **opts) + ve = peps.compute_local_expectation( + vterms, normalized=normalized, return_all=True, **opts) + + for where, G in hterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="reduce-split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(he[where][0], rel=1e-2) + assert norm == pytest.approx(he[where][1], rel=1e-2) + + for where, G in vterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(ve[where][0], rel=1e-2) + assert norm == pytest.approx(ve[where][1], rel=1e-2) + +@pytest.mark.usefixtures('u1setup') +class TestPEPS_U1(TestPEPS_U11): + pass + +@pytest.mark.usefixtures('z22setup') +class TestPEPS_Z22(TestPEPS_U11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestPEPS_Z4(TestPEPS_U11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestPEPS_Z2(TestPEPS_U11): + pass diff --git a/quimb/tensor/test/test_fermion_numerics.py b/quimb/tensor/test/test_fermion_numerics.py new file mode 100644 index 00000000..77d94522 --- /dev/null +++ b/quimb/tensor/test/test_fermion_numerics.py @@ -0,0 +1,215 @@ +import pytest +import numpy as np +from quimb.tensor.fermion import ( + FermionTensor, FermionTensorNetwork, tensor_contract) +from quimb.tensor.block_gen import rand_all_blocks as rand +from quimb.tensor.block_interface import set_options, set_symmetry + +set_options(fermion=True) + +@pytest.fixture(scope='class') +def u11setup(request): + bond = [(0,0), (1,1), (1,-1), (2,0)] + set_options(symmetry="u11") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(1,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(-1,-1)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,-1)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(-1,1)) + + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,-1)) + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z22setup(request): + bond = [(0,0), (0,1), (1,0), (1,1)] + set_options(symmetry="z22") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(0,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(1,0)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,0)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(0,1)) + + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,0)) + + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def u1setup(request): + bond = (0,1,2,3) + set_options(symmetry="u1") + + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=-1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=-2) + + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z4setup(request): + bond = (0,1,2,3) + set_options(symmetry="z4") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=0) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=1) + + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z2setup(request): + bond = (0,1) + set_options(symmetry="z2") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=0) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=1) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=0) + + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + + +@pytest.mark.usefixtures('u11setup') +class TestU11: + def test_backend(self): + Tegbc = tensor_contract(self.Tega, self.Tabc, output_inds=("e","g","b", "c")) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_between(self): + tn1 = self.tn.copy() + tn1.contract_between("abc", "ega") + Tegbc = tn1["abc"].transpose("e","g","b","c") + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_all(self): + result = self.tn.contract(all) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + deg1 = np.tensordot(self.bcd, egbc, axes=[(0,1),(2,3)]) + ref_val = np.tensordot(self.deg, deg1, axes=[(0,1,2),]*2) + err = abs(result - ref_val) + assert err < 1e-10 + + def test_contract_ind(self): + tn1 = self.tn.copy() + tn1.contract_ind("d") + out = tn1["deg"].transpose("e","g","b","c") + egbc = np.tensordot(self.deg, self.bcd, axes=[(0,),(2,)]) + err = (egbc - out.data).norm() + assert err < 1e-10 + + def test_balance_bonds(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.balance_bonds() + exact_bb = norm1.contract(all, optimize="auto-hq") + assert exact_bb == pytest.approx(exact, rel=1e-2) + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert (tsr1-tsr).data.norm() >1e-10 + + def test_equlaize_norm(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.equalize_norms() + exact_en = norm1.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + ref1 = list(norm1.tensor_map.values())[0].norm() + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert tsr1.norm() == pytest.approx(ref1, rel=1e-2) + + def test_split(self): + Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) + u, s, v = Tegbc.split(("e","b"), method="svd", absorb=None, get="tensors") + out = tensor_contract(u,s,v, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + + for absorb in ["left", "right"]: + for method in ["qr", "svd"]: + l, r = Tegbc.split(("g","c"), method=method, absorb=absorb, get="tensors") + out = tensor_contract(l, r, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + +@pytest.mark.usefixtures('u1setup') +class TestU1(TestU11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestZ4(TestU11): + pass + +@pytest.mark.usefixtures('z22setup') +class TestZ22(TestU11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestZ2(TestU11): + pass diff --git a/tests/test_fermion/test_fermion_2d.py b/tests/test_fermion/test_fermion_2d.py new file mode 100644 index 00000000..2ca34e57 --- /dev/null +++ b/tests/test_fermion/test_fermion_2d.py @@ -0,0 +1,173 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion_2d import FPEPS +from pyblock3.algebra.fermion import SparseFermionTensor +from pyblock3.algebra.symmetry import QPN, BondInfo + +class TestPEPSConstruct: + @pytest.mark.parametrize('where', [ + (0, 0), (0, 1), (0, 2), (2, 0), + (1, 0), (1, 1), (1, 2), (2, 1) + ]) + @pytest.mark.parametrize('contract', [False, True]) + def test_gate_2d_single_site(self, where, contract): + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() + Lx = 3 + Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2, seed=42, tags='KET') + xe = psi.compute_local_expectation({where: G}) + tn = psi.H & psi.gate(G, where, contract=contract) + assert len(tn.tensors) == 2 * Lx * Ly + int(not contract) + assert tn ^ all == pytest.approx(xe) + + @pytest.mark.parametrize( + 'contract', [False, True, 'split', 'reduce-split']) + @pytest.mark.parametrize('where', [ + [(1, 1), (2, 1)], [(2, 1), (2, 2)] + ]) + def test_gate_2d_two_site(self, where, contract): + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + G = SparseFermionTensor.random((bond, bond,bond,bond), pattern="++--").to_flat() + Lx = 3 + Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2, seed=42, tags='KET') + xe = psi.compute_local_expectation({tuple(where): G}) + tn = psi.H & psi.gate(G, tuple(where), contract=contract) + change = {False: 1, True: -1, 'split': 0, 'reduce-split': 0}[contract] + assert len(tn.tensors) == 2 * Lx * Ly + change + assert tn ^ all == pytest.approx(xe) + +class Test2DContract: + + def test_contract_2d_one_layer_boundary(self): + psi = FPEPS.rand(4, 4, 2, seed=42, tags='KET') + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=6) + assert xt == pytest.approx(xe, rel=1e-2) + + def test_contract_2d_two_layer_boundary(self): + psi = FPEPS.rand(4, 4, 2, seed=42, tags='KET') + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=6, layer_tags=['KET', 'BRA']) + assert xt == pytest.approx(xe, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_row_envs(self, two_layer): + psi = FPEPS.rand(4, 2, 2, seed=42, tags='KET') + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_row_environments(**compress_opts) + + for i in range(norm.Lx): + norm_i = ( + row_envs['below', i] & + row_envs['mid', i] & + row_envs['above', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_col_envs(self, two_layer): + psi = FPEPS.rand(2, 4, 2, seed=42, tags='KET') + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_col_environments(**compress_opts) + + for i in range(norm.Ly): + norm_i = ( + row_envs['left', i] & + row_envs['mid', i] & + row_envs['right', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + + def test_normalize(self): + psi = FPEPS.rand(3, 3, 2, seed=42) + norm = psi.make_norm().contract(all) + assert norm != pytest.approx(1.0) + psi.normalize_(balance_bonds=True, equalize_norms=True, cutoff=2e-3) + norm = psi.make_norm().contract(all) + assert norm == pytest.approx(1.0, rel=1e-2) + + + + def test_compute_local_expectation_one_sites(self): + peps = FPEPS.rand(4, 3, 2, seed=42) + coos = list(itertools.product([0, 2, 3], [0, 1, 2])) + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + terms = {coo: SparseFermionTensor.random((bond, bond)).to_flat() for coo in coos} + + expecs = peps.compute_local_expectation( + terms, + normalized=True, + return_all=True) + + norm = peps.compute_norm() + for where, G in terms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where) + tn = ket & bra + out = tn.contract_boundary(max_bond=12) + assert out == pytest.approx(expecs[where][0], rel=1e-2) + assert norm == pytest.approx(expecs[where][1], rel=1e-2) + + def test_compute_local_expectation_two_sites(self): + normalized=True + peps = FPEPS.rand(4, 3, 2, seed=42) + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + Hij = SparseFermionTensor.random((bond, bond, bond, bond), pattern="++--").to_flat() + hterms = {coos: Hij for coos in peps.gen_horizontal_bond_coos()} + vterms = {coos: Hij for coos in peps.gen_vertical_bond_coos()} + + opts = dict(cutoff=2e-3, max_bond=12, contract_optimize='random-greedy') + norm = peps.compute_norm(max_bond=12, cutoff=2e-3) + he = peps.compute_local_expectation( + hterms, normalized=normalized, return_all=True, **opts) + ve = peps.compute_local_expectation( + vterms, normalized=normalized, return_all=True, **opts) + + for where, G in hterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="reduce-split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(he[where][0], rel=1e-2) + assert norm == pytest.approx(he[where][1], rel=1e-2) + + for where, G in vterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(ve[where][0], rel=1e-2) + assert norm == pytest.approx(ve[where][1], rel=1e-2) diff --git a/tests/test_fermion/test_numerics.py b/tests/test_fermion/test_numerics.py new file mode 100644 index 00000000..ef82f1ef --- /dev/null +++ b/tests/test_fermion/test_numerics.py @@ -0,0 +1,94 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion import ( + FermionTensor, FermionTensorNetwork, tensor_contract) + +from quimb.tensor.fermion_2d import FPEPS, gen_mf_peps + +from pyblock3.algebra.symmetry import (QPN, BondInfo) +from pyblock3.algebra.fermion import SparseFermionTensor + +np.random.seed(3) +bond_1 = BondInfo({QPN(0):3, QPN(1,1): 3, QPN(1,-1):3, QPN(2):3}) +bond_2 = BondInfo({QPN(0):5, QPN(1,1): 5, QPN(1,-1):5, QPN(2):5}) + +abc = SparseFermionTensor.random( + (bond_2, bond_1, bond_1), dq=QPN(1,1), pattern="+--").to_flat() + +bcd = SparseFermionTensor.random( + (bond_1, bond_1, bond_1), dq=QPN(1,-1), pattern="++-").to_flat() + +ega = SparseFermionTensor.random( + (bond_1, bond_1, bond_2), dq=QPN(1,1), pattern="-++").to_flat() + +deg = SparseFermionTensor.random( + (bond_1, bond_1, bond_1), dq=QPN(1,-1), pattern="-+-").to_flat() + +tsr_abc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) +tsr_ega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) +tsr_bcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) +tsr_deg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + +tn = FermionTensorNetwork((tsr_abc, tsr_ega, tsr_bcd, tsr_deg)) + +# Tensor Order: deg, bcd, ega, abc +# Tensor Order: 3, 2, 1, 0 + +class TestContract: + def test_backend(self): + tsr_egbc = tensor_contract(tsr_abc, tsr_ega, output_inds=("e","g","b", "c")) + egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) + err = (egbc - tsr_egbc.data).norm() + assert err < 1e-10 + + def test_contract_between(self): + tn1 = tn.copy() + tn1.contract_between("abc", "ega") + tsr_egbc = tn1["abc"].transpose("e","g","b","c") + + egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) + err = (egbc - tsr_egbc.data).norm() + assert err < 1e-10 + + def test_contract_all(self): + result = tn.contract(all) + + egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) + deg1 = np.tensordot(bcd, egbc, axes=[(0,1),(2,3)]) + ref_val = np.tensordot(deg, deg1, axes=[(0,1,2),]*2).data[0] + + err = abs(result - ref_val) + assert err < 1e-10 + + def test_contract_ind(self): + tn1 = tn.copy() + tn1.contract_ind("d") + out = tn1["deg"].transpose("e","g","b","c") + egbc = np.tensordot(deg, bcd, axes=[(0,),(2,)]) + err = (egbc - out.data).norm() + assert err < 1e-10 + + +class TestBalance: + def test_balance_bonds(self): + Lx = Ly = 4 + psi = FPEPS.rand(Lx, Ly, 2) + norm = psi.make_norm() + exact = norm.contract(all, optimize="auto-hq") + psi1 = psi.balance_bonds() + norm = psi1.make_norm() + exact_bb = norm.contract(all, optimize="auto-hq") + assert exact_bb == pytest.approx(exact, rel=1e-2) + + def test_equlaize_norm(self): + Lx = Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2) + norm = psi.make_norm() + exact = norm.contract(all, optimize="auto-hq") + psi1 = psi.equalize_norms() + norm = psi1.make_norm() + exact_en = norm.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + for ix, iy in itertools.product(range(Lx), range(Ly)): + assert psi[ix,iy].norm() != pytest.approx(psi1[ix,iy], rel=1e-2) diff --git a/tests/test_fermion/test_operators.py b/tests/test_fermion/test_operators.py new file mode 100644 index 00000000..d75595c3 --- /dev/null +++ b/tests/test_fermion/test_operators.py @@ -0,0 +1,124 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion_2d import gen_mf_peps, FPEPS +from pyblock3.algebra import fermion_operators as ops +from pyblock3.algebra.symmetry import QPN +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.fermion import SparseFermionTensor + +Lx = Ly = 6 +np.random.seed(3) +state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) +psi = gen_mf_peps(state_array) + +class TestOperators: + def test_hopping(self): + t = 2 + hop = ops.hopping(t) + blocks=[] + states = np.ones([1,1]) * .5 ** .5 + blocks.append(SubTensor(reduced=states, q_labels=(QPN(0),QPN(1,1)))) #0+ + blocks.append(SubTensor(reduced=states, q_labels=(QPN(1,1),QPN(0)))) #+0, eigenstate of hopping + # psi = |0+> + |+0> - |-0>, eigenstate of hopping(eigval = -t) + ket = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + bra = ket.dagger + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(-t, rel=1e-2) + + def test_exponential_hop(self): + t = 3 + tau = 0.1 + hop = ops.hopping(t) + hop_exp = hop.to_exponential(-tau) + blocks=[] + states = np.ones([1,1]) * .5 + blocks.append(SubTensor(reduced=states, q_labels=(QPN(2), QPN(0)))) + blocks.append(SubTensor(reduced=states, q_labels=(QPN(0), QPN(2)))) + blocks.append(SubTensor(reduced=-states, q_labels=(QPN(1,1), QPN(1,-1)))) + blocks.append(SubTensor(reduced=states, q_labels=(QPN(1,-1), QPN(1,1)))) + + ket = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + bra = ket.dagger + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(2*t, rel=1e-2) + + ket1 = np.tensordot(hop_exp, ket, axes=((2,3),(0,1))) + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(np.e**(-2*t*tau), rel=1e-2) + + def test_onsite_u(self): + U = 4. + uop = ops.onsite_u(U) + terms = {coo: uop for coo in itertools.product(range(Lx), range(Ly))} + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + for ix, iy in itertools.product(range(Lx), range(Ly)): + ref = U if state_array[ix,iy]==3 else 0. + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_sz(self): + sz = ops.measure_sz() + terms = {coo: sz for coo in itertools.product(range(Lx), range(Ly))} + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + ref_dic = {0:0., 1:0.5, 2:-.5, 3:0.} + for ix, iy in itertools.product(range(Lx), range(Ly)): + state = state_array[ix,iy] + ref = ref_dic[state] + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_n(self): + nop = ops.count_n() + terms = {coo: nop for coo in itertools.product(range(Lx), range(Ly))} + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + ref_dic = {0:0., 1:1, 2:1, 3:2} + for ix, iy in itertools.product(range(Lx), range(Ly)): + state = state_array[ix,iy] + ref = ref_dic[state] + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_hubbard(self): + Lx = Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2) + t = 2. + U = 6. + mu = 0.2 + hop = ops.hopping(t) + uop = ops.onsite_u(U) + nop = ops.count_n() + full_terms = {(ix, iy): uop + mu*nop for ix, iy in itertools.product(range(Lx), range(Ly))} + hterms = {coos: hop for coos in psi.gen_horizontal_bond_coos()} + vterms = {coos: hop for coos in psi.gen_vertical_bond_coos()} + full_terms.update(hterms) + full_terms.update(vterms) + mu_terms = {(ix, iy): nop for ix, iy in itertools.product(range(Lx), range(Ly))} + ene = psi.compute_local_expectation(full_terms, max_bond=12) + + ham = dict() + count_neighbour = lambda i,j: (i>0) + (i0) + (j