From 3c5be57b761e3a6354479c565093d4c500aa8b85 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 16 Dec 2020 15:57:51 -0800 Subject: [PATCH 01/61] first commit, fermion Tensor, TensorNetwork, PEPS implemented --- quimb/tensor/fermion.py | 1374 ++++++++++++++++++++++++++++ quimb/tensor/fermion_2d.py | 523 +++++++++++ quimb/tensor/test/test_2d.py | 170 ++++ quimb/tensor/test/test_contract.py | 76 ++ quimb/tensor/test/test_norm.py | 163 ++++ quimb/tensor/test/test_row_env.py | 136 +++ 6 files changed, 2442 insertions(+) create mode 100644 quimb/tensor/fermion.py create mode 100644 quimb/tensor/fermion_2d.py create mode 100644 quimb/tensor/test/test_2d.py create mode 100644 quimb/tensor/test/test_contract.py create mode 100644 quimb/tensor/test/test_norm.py create mode 100644 quimb/tensor/test/test_row_env.py diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py new file mode 100644 index 00000000..9d06a252 --- /dev/null +++ b/quimb/tensor/fermion.py @@ -0,0 +1,1374 @@ +import numpy as np +import weakref +import functools +from .tensor_core import (Tensor, TensorNetwork, + rand_uuid, tags_to_oset, + tensor_split, + _parse_split_opts, + check_opt, + _VALID_SPLIT_GET) +from .tensor_core import tensor_contract as _tensor_contract +from ..utils import oset, valmap +from .array_ops import asarray, ndim, transpose + +def _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds=None, direction='left'): + """ Perform pairwise contraction for two tensors in a specified fermion space. + If the two tensors are not adjacent, move one of the tensors in the given direction. + Note this could alter the tensors that are in between the two tensors in the fermion space + + Parameters + ---------- + fs : FermionSpace obj + the FermionSpace obj that contains the two tensors + tid_or_site1: a string or an integer + The string that specifies the id for the first tensor or the site for the first tensor + tid_or_site2: a string or an integer + The string that specifies the id for the 2nd tensor or the site for the 2nd tensor + out_inds: a list of strings + The list that specifies the output indices and its order + direction: string "left" or "right" + The direction to move tensors if the two tensors are not adjacent + + Returns + ------- + out : a FermionTensor object or a number + """ + site1 = fs[tid_or_site1][1] + site2 = fs[tid_or_site2][1] + + if not fs.is_adjacent(tid_or_site1, tid_or_site2): + fs.make_adjacent(tid_or_site1, tid_or_site2, direction) + + if direction=="left": + site1 = min(site1, site2) + else: + site1 = max(site1, site2) - 1 + + site2 = site1 + 1 + tsr1 = fs[site1][2] + tsr2 = fs[site2][2] + ainds, binds = tsr1.inds, tsr2.inds + _output_inds = [] + ax_a, ax_b = [], [] + for kia, ia in enumerate(ainds): + if ia not in binds: + _output_inds.append(ia) + else: + ax_a.append(kia) + ax_b.append(binds.index(ia)) + for kib, ib in enumerate(binds): + if ib not in ainds: + _output_inds.append(ib) + if out_inds is None: out_inds=_output_inds + if set(_output_inds) != set(out_inds): + raise TypeError("specified out_inds not allowed in tensordot, \ + make sure no summation/Hadamard product appears") + + out = np.tensordot(tsr1.data, tsr2.data, axes=[ax_a, ax_b]) + if len(out_inds)==0: + return out.data[0] + + if out_inds!=_output_inds: + transpose_order = tuple([_output_inds.index(ia) for ia in out_inds]) + out = out.transpose(transpose_order) + o_tags = oset.union(*(tsr1.tags, tsr2.tags)) + out = FermionTensor(out, inds=out_inds, tags=o_tags) + return out + +def _fetch_fermion_space(*tensors, inplace=True): + """ Retrieve the FermionSpace and the associated tensor_ids for the tensors. + If the given tensors all belong to the same FermionSpace object (fsobj), + the underlying fsobj will be returned. Otherwise, a new fsobj will be created, + and the tensors will be placed in the same order as the input tensor list/tuple. + + Parameters + ---------- + tensors : a tuple or list of FermionTensors + input_tensors + inplace: bool + if not true, a new FermionSpace will be created with all tensors copied. + so subsequent operations on the fsobj will not alter the input tensors. + tid_or_site2: a string or an integer + The string that specifies the id for the 2nd tensor or the site for the 2nd tensor + out_inds: a list of strings + The list that specifies the output indices and its order + direction: string "left" or "right" + The direction to move tensors if the two tensors are not adjacent + + Returns + ------- + fs : a FermionSpace object + tid_lst: a list of strings for the tensor_ids + """ + if isinstance(tensors, FermionTensor): + tensors = (tensors, ) + + if is_mergeable(*tensors): + fs = tensors[0].fermion_owner[1]() + if not inplace: + fs = fs.copy() + tid_lst = [tsr.fermion_owner[2] for tsr in tensors] + else: + fs = FermionSpace() + for tsr in tensors: + fs.add_tensor(tsr, virtual=inplace) + tid_lst = list(fs.tensor_order.keys()) + return fs, tid_lst + +def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False): + """ Perform tensor contractions for all the given tensors. + If input tensors do not belong to the same underlying fsobj, + the position of each tensor will be the same as its order in the input tensor tuple/list. + Note summation and Hadamard product not supported as it's not well defined for fermionic tensors + + Parameters + ---------- + tensors : a tuple or list of FermionTensors + input tensors + output_inds: a list of strings + direction: string "left" or "right" + The direction to move tensors if the two tensors are not adjacent + inplace: bool + whether to move/contract tensors in place. + + Returns + ------- + out : a FermionTensor object or a number + """ + path_info = _tensor_contract(*tensors, get='path-info') + fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) + for conc in path_info.contraction_list: + pos1, pos2 = conc[0] + tid1 = tid_lst.pop(pos1) + tid2 = tid_lst.pop(pos2) + site1 = fs[tid1][1] + site2 = fs[tid2][1] + out = fs._contract_pairs(site1, site2, direction=direction, inplace=True) + if not isinstance(out, (float, complex)): + tid_lst.append(out.fermion_owner[2]) + + if not isinstance(out, (float, complex)): + _output_inds = out.inds + if output_inds is None: output_inds = _output_inds + if set(_output_inds) != set(output_inds): + raise TypeError("specified out_inds not allow in tensordot, \ + make sure not summation/Hadamard product appears") + if output_inds!=_output_inds: + transpose_order = tuple([_output_inds.index(ia) for ia in output_inds]) + out = out.transpose(transpose_order, inplace=True) + return out + +def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=None, cutoff=1e-10, + cutoff_mode='rel', renorm=None, ltags=None, rtags=None, stags=None, bond_ind=None, right_inds=None): + check_opt('get', get, _VALID_SPLIT_GET) + + if left_inds is None: + left_inds = oset(T.inds) - oset(right_inds) + else: + left_inds = tags_to_oset(left_inds) + + if right_inds is None: + right_inds = oset(T.inds) - oset(left_inds) + + opts = _parse_split_opts( + method, cutoff, absorb, max_bond, cutoff_mode, renorm) + _left_inds = [T.inds.index(i) for i in left_inds] + _right_inds =[T.inds.index(i) for i in right_inds] + + if method == "svd": + left, s, right = T.data.tensor_svd(_left_inds, right_idx=_right_inds, **opts) + else: + raise NotImplementedError + + if get == 'arrays': + if absorb is None: + return left, s, right + return left, right + + ltags = T.tags | tags_to_oset(ltags) + rtags = T.tags | tags_to_oset(rtags) + if bond_ind is None: + if absorb is None: + bond_ind = (rand_uuid(), rand_uuid()) + else: + bond_ind = (rand_uuid(),) + + Tl = FermionTensor(data=left, inds=(*left_inds, bond_ind[0]), tags=ltags) + Tr = FermionTensor(data=right, inds=(bond_ind[-1], *right_inds), tags=rtags) + + if absorb is None: + stags = T.tags | tags_to_oset(stags) + Ts = FermionTensor(data=s, inds=bond_ind, tags=stags) + tensors = (Tl, Ts, Tr) + else: + tensors = (Tl, Tr) + + if get == 'tensors': + return tensors + + return FermionTensorNetwork(tensors, check_collisions=False, virtual=True) + +class FermionSpace: + """A labelled, ordered dictionary. The tensor labels point to the tensor + and its position inside the fermion space. + + Parameters + ---------- + tensor_order : dictionary + tensor_order[tid] = (tensor, site) + """ + + def __init__(self, tensor_order=None, virtual=True): + self.tensor_order = {} + if tensor_order is not None: + if virtual: + self.tensor_order = tensor_order + else: + for tid, (tsr, site) in tensor_order.items(): + self.add_tensor(tsr, tid, site, virtual=virtual) + + @property + def sites(self): + """ return a list of all the occupied positions + """ + if len(self.tensor_order) == 0: + return [] + else: + return [val[1] for val in self.tensor_order.values()] + + def is_continuous(self): + """ Check whether the tensors are continously placed in the Fermion Space + """ + sites = self.sites + if len(sites) == 0: + return True + else: + if np.unique(sites).size != len(sites): + raise ValueError("at least one site is occupied multiple times") + return len(sites) == (max(sites)-min(sites)+1) + + def copy(self): + """ Copy the Fermion Space object. Tensor_ids and positions will be + preserved and tensors will be copied + """ + new_fs = FermionSpace(self.tensor_order, virtual=False) + return new_fs + + def to_tensor_network(self, site_lst=None): + """ Construct a inplace FermionTensorNetwork obj with tensors at given sites + """ + if site_lst is None: + tsrs = tuple([tsr for (tsr, _) in self.tensor_order.values()]) + else: + tsrs = tuple([tsr for (tsr, site) in self.tensor_order.values() if site in site_lst]) + return FermionTensorNetwork(tsrs, virtual=True) + + def add_tensor(self, tsr, tid=None, site=None, virtual=False): + """ Add a tensor to the current FermionSpace, eg + 01234 0123456 + XXXXX, (6, B) -> XXXXX-B + + Parameters + ---------- + tsr : FermionTensor obj + The desired output sequence of indices. + tid : string, optional + The desired tensor label + site: int or None, optional + The position to place the tensor. Tensor will be + appended if not specified + virtual: bool + whether to add the tensor inplace + + """ + if (tid is None) or (tid in self.tensor_order.keys()): + tid = rand_uuid(base="_T") + if site is None: + site = 0 if len(self.sites)==0 else max(self.sites) + 1 + if site not in self.sites: + T = tsr if virtual else tsr.copy() + self.tensor_order[tid] = (T, site) + T.set_fermion_owner(self, tid) + else: + raise ValueError("site:%s occupied, use replace/insert_tensor method"%site) + + def replace_tensor(self, site, tsr, tid=None, virtual=False): + """ Replace the tensor at a given site, eg + 0123456789 0123456789 + XXXXAXXXXX, (4, B) -> XXXXBXXXXX + """ + atid, site, atsr = self[site] + T = tsr if virtual else tsr.copy() + if tid is None or (tid in self.tensor_order.keys() and tid != atid): + tid = atid + + T.set_fermion_owner(self, tid) + atsr.remove_fermion_owner() + del atsr + self.tensor_order[tid] = (T, site) + + def insert_tensor(self, site, tsr, tid=None, virtual=False): + """ insert a tensor at a given site, all tensors afterwards + will be shifted by 1 to the right, eg, + 012345678 0123456789 + ABCDEFGHI, (4, X) -> ABCDXEFGHI + """ + if (tid is None) or (tid in self.tensor_order.keys()): + tid = rand_uuid(base="_T") + if site not in sites: + self.add_tensor(tsr, tid, site=site, virtual=virtual) + else: + T = tsr if virtual else tsr.copy() + T.set_fermion_owner(self, tid) + new_tensor_order = {tid: (T, site)} + for atid, (atsr, asite) in self.tensor_order.items(): + new_tensor_order[atid] = (atsr, asite+(asite>=site)) + + def insert(self, site, *tsr, virtual=False): + for T in tsr: + self.insert_tensor(site, T, virtual=virtual) + site += 1 + + def get_tid(self, site): + """ Return the tensor id at given site + """ + if site not in self.sites: + raise KeyError("site:%s not occupied"%site) + idx = self.sites.index(site) + return list(self.tensor_order.keys())[idx] + + def is_adjacent(self, tid_or_site1, tid_or_site2): + """ Check whether two tensors are adjacently placed in the space + """ + site1 = self[tid_or_site1][1] + site2 = self[tid_or_site2][1] + distance = abs(site1-site2) + return distance == 1 + + def __getitem__(self, tid_or_site): + if isinstance(tid_or_site, str): + if tid_or_site not in self.tensor_order.keys(): + raise KeyError("tid:%s not found"%tid_or_site) + tsr, site = self.tensor_order[tid_or_site] + return tid_or_site, site, tsr + elif isinstance(tid_or_site, int): + if tid_or_site not in self.sites: + raise KeyError("site:%s not occupied"%tid_or_site) + tid = self.get_tid(tid_or_site) + tsr = self.tensor_order[tid][0] + return tid, tid_or_site, tsr + else: + raise ValueError("not a valid key value(tid or site)") + + def __setitem__(self, site, tsr): + if site in self.sites: + self.replace_tensor(site, tsr) + else: + self.add_tensor(site, tsr) + + def _move_left(self, tid_or_site): + """ Switch position for the specified tensor with the tensor to its left + A_{n-1} A_n = \tilda{A}_{n} \tilda{A}_{n-1} + global phase factorized to \tilda{A}_{n-1}, + local phase factorized to \tilda{A}_n + """ + tid, site, tsr = self[tid_or_site] + if site != min(self.sites): + if site-1 not in self.sites: + raise ValueError("left of site %s not occupied"%site) + tid_l, site_l, tsr_l = self[site-1] + if tsr.parity * tsr_l.parity: + tsr_l.data._global_flip() + axes = [] + for ax, s in enumerate(tsr.inds): + if s in tsr_l.inds: + axes.append(ax) + tsr.data._local_flip(axes) + self.tensor_order[tid] = (tsr, site-1) + self.tensor_order[tid_l] = (tsr_l, site) + + def _move_right(self, tid_or_site): + """ Switch position for the specified tensor with the tensor to its left + A_n A_{n+1} = \tilda{A}_{n+1} \tilda{A}_n + global phase factorized to \tilda{A}_n, + local phase factorized to \tilda{A}_{n+1} + """ + tid, site, tsr = self[tid_or_site] + if site != max(self.sites): + if site+1 not in self.sites: + raise ValueError("right of site %s not occupied"%site) + tid_r, site_r, tsr_r = self[site+1] + if tsr.parity * tsr_r.parity: + tsr.data._global_flip() + axes = [] + for ax, s in enumerate(tsr_r.inds): + if s in tsr.inds: + axes.append(ax) + tsr_r.data._local_flip(axes) + self.tensor_order[tid] = (tsr, site+1) + self.tensor_order[tid_r] = (tsr_r, site) + + def move(self, tid_or_site, des_site): + '''Both local and global phase factorized to the tensor that's being operated on + ''' + tid, site, tsr = self[tid_or_site] + if site == des_site: return + move_left = (des_site < site) + iterator = range(des_site, site) if move_left else range(site+1, des_site+1) + shared_inds = [] + tid_lst = [self[isite][0] for isite in iterator] + parity = 0 + for itid in tid_lst: + itsr, isite = self.tensor_order[itid] + parity += itsr.parity + shared_inds += list(oset(itsr.inds) & oset(tsr.inds)) + if move_left: + self.tensor_order[itid] = (itsr, isite+1) + else: + self.tensor_order[itid] = (itsr, isite-1) + global_parity = (parity % 2) * tsr.data.parity + if global_parity != 0: tsr.data._global_flip() + axes = [tsr.inds.index(i) for i in shared_inds] + if len(axes)>0: tsr.data._local_flip(axes) + self.tensor_order[tid] = (tsr, des_site) + + def make_adjacent(self, tid_or_site1, tid_or_site2, direction='left'): + """ Move one tensor in the specified direction to make the two adjacent + """ + if not self.is_adjacent(tid_or_site1, tid_or_site2): + site1 = self[tid_or_site1][1] + site2 = self[tid_or_site2][1] + if site1 == site2: return + sitemin, sitemax = min(site1, site2), max(site1, site2) + if direction == 'left': + for isite in range(sitemax, sitemin+1, -1): + self._move_left(isite) + elif direction == 'right': + for isite in range(sitemin, sitemax-1): + self._move_right(isite) + else: + raise ValueError("direction %s not recognized"%direction) + + def _contract_pairs(self, tid_or_site1, tid_or_site2, out_inds=None, direction='left', inplace=True): + """ Contract two tensors in the FermionSpace + + Parameters + ---------- + tid_or_site1 : string or int + Tensor_id or position for the 1st tensor + tid_or_site2 : string or int + Tensor_id or position for the 2nd tensor + out_inds: list of string, optional + The order for the desired output indices + direction: string + The direction to move tensors if the two are not adjacent + inplace: bool + Whether to contract/move tensors inplace or in a copied fermionspace + + Returns + ------- + out : a FermionTensor object or a number + """ + fs = self if inplace else self.copy() + out = _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds, direction) + + if isinstance(out, (float, complex)): + return out + + site1 = fs[tid_or_site1][1] + site2 = fs[tid_or_site2][1] + + if direction=="left": + site1 = min(site1, site2) + else: + site1 = max(site1, site2) - 1 + site2 = site1 + 1 + # the output fermion tensor will replace the two input tensors in the space + fs.replace_tensor(site1, out, virtual=True) + fs.remove_tensor(site2) + + return out + + def remove_tensor(self, tid_or_site, inplace=True): + """ remove a specified tensor at a given site, eg + 012345 01234 + ABCDEF, (3, True) -> ABCEF + + 012345 012345 + ABCDEF, (3, False) -> ABC-EF + """ + tid, site, tsr = self[tid_or_site] + del self.tensor_order[tid] + if inplace: + indent_sites = [] + for isite in self.sites: + if isite > site: + indent_sites.append(isite) + indent_sites = sorted(indent_sites) + tid_lst = [self.get_tid(isite) for isite in indent_sites] + for tid in tid_lst: + tsr, site = self.tensor_order[tid] + self.tensor_order[tid] = (tsr, site-1) + + def compress_space(self): + """ if the space is not continously occupied, compress it, eg, + 012345678 01234 + -A--B-CDE -> ABCDE + """ + sites = self.sites + if min(sites) ==0 and self.is_continuous(): + return + for tid, (tsr, site) in self.tensor_order.items(): + isite = sum(sites U^{cba\star}cba + """ + axes = list(range(self.ndim))[::-1] + data = self.data.permute(axes).conj() + inds = self.inds[::-1] + tsr = self.copy() + tsr.modify(data=data, inds=inds) + + return tsr + + def ind_size(self, ind): + size = 0 + for blkshape in self.shapes: + size += blkshape[self.inds.index(ind)] + return size + + def fuse(self, fuse_map, inplace=False): + raise NotImplementedError + + def unfuse(self, unfuse_map, shape_map, inplace=False): + raise NotImplementedError + + def squeeze(self, inplace=False): + raise NotImplementedError + + def norm(self): + """Frobenius norm of this tensor. + """ + return np.linalg.norm(self.data.data) + + def symmetrize(self, ind1, ind2, inplace=False): + raise NotImplementedError + + def unitize(self, left_inds=None, inplace=False, method='qr'): + raise NotImplementedError + + def randomize(self, dtype=None, inplace=False, **randn_opts): + raise NotImplementedError + + def flip(self, ind, inplace=False): + raise NotImplementedError + + def __and__(self, other): + """Combine with another ``Tensor`` or ``TensorNetwork`` into a new + ``TensorNetwork``. + """ + return FermionTensorNetwork((self, other)) + + def __or__(self, other): + """Combine virtually (no copies made) with another ``Tensor`` or + ``TensorNetwork`` into a new ``TensorNetwork``. + """ + return FermionTensorNetwork((self, other), virtual=True) + + def graph(self, *args, **kwargs): + """Plot a graph of this tensor and its indices. + """ + FermionTensorNetwork((self,)).graph(*args, **kwargs) + + +def is_mergeable(*ts_or_tsn): + """Check if all objects(FermionTensor or FermionTensorNetwork) + are part of the same FermionSpace + """ + if isinstance(ts_or_tsn, (FermionTensor, FermionTensorNetwork)): + return True + fs_lst = [] + site_lst = [] + for obj in ts_or_tsn: + if isinstance(obj, FermionTensor): + if obj.fermion_owner is None: + return False + hashval, fsobj, tid = obj.fermion_owner + fs_lst.append(hashval) + site_lst.append(fsobj()[tid][1]) + elif isinstance(obj, FermionTensorNetwork): + fs_lst.append(hash(obj.fermion_space)) + site_lst.extend(obj.filled_sites) + else: + raise TypeError("unable to find fermionspace") + + return all([fs==fs_lst[0] for fs in fs_lst]) and len(set(site_lst)) == len(site_lst) + +class FermionTensorNetwork(TensorNetwork): + + + def __init__(self, ts, *, virtual=False, check_collisions=True): + + if is_mergeable(*ts) and virtual: + self.tensor_map = dict() + self.tag_map = dict() + self.ind_map = dict() + self.fermion_space = _fetch_fermion_space(*ts)[0] + self.assemble(ts) + else: + if isinstance(ts, FermionTensorNetwork): + self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map) + self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map) + self.fermion_space = ts.fermion_space if virtual else ts.fermion_space.copy() + self.tensor_map = dict() + for tid, t in ts.tensor_map.items(): + self.tensor_map[tid] = self.fermion_space[tid][2] + self.tensor_map[tid].add_owner(self, tid) + for ep in ts.__class__._EXTRA_PROPS: + setattr(self, ep, getattr(ts, ep)) + return + + # internal structure + self.fermion_space = FermionSpace() + self.tensor_map = dict() + self.tag_map = dict() + self.ind_map = dict() + self._inner_inds = oset() + for t in ts: + self.add(t, virtual=virtual, check_collisions=check_collisions) + self._inner_inds = None + + def __and__(self, other): + """Combine this tensor network with more tensors, without contracting. + Copies the tensors. + """ + virtual = is_mergeable(self, other) + return FermionTensorNetwork((self, other), virtual=virtual) + + def __or__(self, other): + """Combine this tensor network with more tensors, without contracting. + Views the constituent tensors. + """ + return FermionTensorNetwork((self, other), virtual=True) + + def assemble_with_tensor(self, tsr): + if not is_mergeable(self, tsr): + raise ValueError("tensor not same in the fermion space of the tensor network") + tid = tsr.fermion_owner[2] + TensorNetwork.add_tensor(self, tsr, tid, virtual=True) + + def assemble_with_tensor_network(self, tsn): + if not is_mergeable(self, tsn): + raise ValueError("the two tensor networks not in the fermion space") + TensorNetwork.add_tensor_network(self, tsn, virtual=True) + + def assemble(self, t): + if isinstance(t, (tuple, list)): + for each_t in t: + self.assemble(each_t) + return + + istensor = isinstance(t, FermionTensor) + istensornetwork = isinstance(t, FermionTensorNetwork) + + if not (istensor or istensornetwork): + raise TypeError("TensorNetwork should be called as " + "`TensorNetwork(ts, ...)`, where each " + "object in 'ts' is a Tensor or " + "TensorNetwork.") + if istensor: + self.assemble_with_tensor(t) + else: + self.assemble_with_tensor_network(t) + + def add_tensor(self, tsr, tid=None, virtual=False, site=None): + if tid is None or tid in self.fermion_space.tensor_order.keys(): + tid = rand_uuid(base="_T") + if virtual: + fs = tsr.fermion_owner + if fs is not None: + if fs[0] != hash(self.fermion_space): + raise ValueError("the tensor is already is in a different Fermion Space, \ + inplace addition not allowed") + else: + if fs[2] in self.tensor_map.keys(): + raise ValueError("the tensor is already in this TensorNetwork, \ + inplace addition not allowed") + else: + self.assemble_with_tensor(tsr) + else: + self.fermion_space.add_tensor(tsr, tid, site, virtual=True) + TensorNetwork.add_tensor(self, tsr, tid, virtual=True) + else: + T = tsr.copy() + self.fermion_space.add_tensor(T, tid, site, virtual=True) + TensorNetwork.add_tensor(self, T, tid, virtual=True) + + def add_tensor_network(self, tn, virtual=False, check_collisions=True): + if virtual: + if hash(tn.fermion_space) == hash(self.fermion_space): + if is_mergeable(self, tn): + TensorNetwork.add_tensor_network(tn, virtual=virtual, check_collisions=check_collisions) + else: + raise ValueError("the two tensornetworks co-share same sites, inplace addition not allow") + return + + if not tn.is_continuous(): + raise ValueError("input tensor network is not contiguously ordered") + + filled_sites = tn.filled_sites + sorted_sites = sorted(filled_sites) + + if check_collisions: # add tensors individually + if getattr(self, '_inner_inds', None) is None: + self._inner_inds = oset(self.inner_inds()) + + # check for matching inner_indices -> need to re-index + other_inner_ix = oset(tn.inner_inds()) + clash_ix = self._inner_inds & other_inner_ix + + if clash_ix: + can_keep_ix = other_inner_ix - self._inner_inds + new_inds = oset(rand_uuid() for _ in range(len(clash_ix))) + reind = dict(zip(clash_ix, new_inds)) + self._inner_inds.update(new_inds, can_keep_ix) + else: + self._inner_inds.update(other_inner_ix) + + # add tensors, reindexing if necessary + for site in sorted_sites: + tid, _, tsr = tn.fermion_space[site] + if clash_ix and any(i in reind for i in tsr.inds): + tsr = tsr.reindex(reind, inplace=virtual) + self.add_tensor(tsr, tid=tid, virtual=virtual) + + else: # directly add tensor/tag indexes + for site in sorted_sites: + tid, _, tsr = tn.fermion_space[site] + self.add_tensor(tsr, tid=tid, virtual=virtual) + + def add(self, t, virtual=False, check_collisions=True): + """Add FermionTensor, FermionTensorNetwork or sequence thereof to self. + """ + if isinstance(t, (tuple, list)): + for each_t in t: + self.add(each_t, virtual=virtual, + check_collisions=check_collisions) + return + + istensor = isinstance(t, FermionTensor) + istensornetwork = isinstance(t, FermionTensorNetwork) + + if not (istensor or istensornetwork): + raise TypeError("TensorNetwork should be called as " + "`TensorNetwork(ts, ...)`, where each " + "object in 'ts' is a Tensor or " + "TensorNetwork.") + + if istensor: + self.add_tensor(t, virtual=virtual) + else: + self.add_tensor_network(t, virtual=virtual, + check_collisions=check_collisions) + + def select(self, tags, which='all'): + + tagged_tids = self._get_tids_from_tags(tags, which=which) + ts = [self.tensor_map[n] for n in tagged_tids] + tn = FermionTensorNetwork(ts, check_collisions=False, virtual=True) + tn.view_like_(self) + + return tn + + def __iand__(self, tensor): + """Inplace, but non-virtual, addition of a Tensor or TensorNetwork to + this network. It should not have any conflicting indices. + """ + if is_mergeable(self, tensor): + self.assemble(tensor) + else: + self.add(tensor, virtual=False) + return self + + def __ior__(self, tensor): + """Inplace, virtual, addition of a Tensor or TensorNetwork to this + network. It should not have any conflicting indices. + """ + self.add(tensor, virtual=True) + return self + + # ------------------------------- Methods ------------------------------- # + + @property + def filled_sites(self): + return [self.fermion_space[tid][1] for tid in self.tensor_map.keys()] + + def is_complete(self): + ''' + Check if the current tensor network contains all the tensors in the fermion space + ''' + full_tid = self.fermion_space.tensor_order.keys() + tensor_tid = self.tensor_map.keys() + return set(full_tid) == set(tensor_tid) + + def is_continuous(self): + """ + Check if sites in the current tensor network are contiguously occupied + """ + filled_sites = self.filled_sites + if len(filled_sites) ==0 : return True + return (max(filled_sites) - min(filled_sites) + 1) == len(filled_sites) + + def copy(self): + """ Tensors and underlying FermionSpace(all tensors in it) will + be copied + """ + return self.__class__(self, virtual=False) + + def simple_copy(self): + newtn = FermionTensorNetwork([]) + newtn.add_tensor_network(self) + newtn.view_like_(self) + return newtn + + def _pop_tensor(self, tid, remove_from_fs=True): + """Remove a tensor from this network, returning said tensor. + """ + # pop the tensor itself + t = self.tensor_map.pop(tid) + + # remove the tid from the tag and ind maps + self._remove_tid(t.tags, self.tag_map, tid) + self._remove_tid(t.inds, self.ind_map, tid) + + # remove this tensornetwork as an owner + t.remove_owner(self) + if remove_from_fs: + self.fermion_space.remove_tensor(tid) + t.remove_fermion_owner() + + return t + + + _pop_tensor_ = functools.partialmethod(_pop_tensor, remove_from_fs=False) + + @property + def H(self): + tn = self.copy() + fs = tn.fermion_space + max_site = max(fs.sites) + + for tid, (tsr, site) in fs.tensor_order.items(): + reverse_order = list(range(tsr.ndim))[::-1] + new_data = tsr.data.permute(reverse_order).conj() + new_inds = tsr.inds[::-1] + tsr.modify(data=new_data, inds=new_inds) + fs.tensor_order.update({tid: (tsr, max_site-site)}) + return tn + + def __mul__(self, other): + raise NotImplementedError + + def __rmul__(self, other): + raise NotImplementedError + + def __imul__(self, other): + raise NotImplementedError + + def __truediv__(self, other): + raise NotImplementedError + + def __itruediv__(self, other): + raise NotImplementedError + + # ----------------- selecting and splitting the network ----------------- # + + + def __setitem__(self, tags, tensor): + #TODO: FIXME + """Set the single tensor uniquely associated with ``tags``. + """ + tids = self._get_tids_from_tags(tags, which='all') + if len(tids) != 1: + raise KeyError("'TensorNetwork.__setitem__' is meant for a single " + "existing tensor only - found {} with tag(s) '{}'." + .format(len(tids), tags)) + + if not isinstance(tensor, Tensor): + raise TypeError("Can only set value with a new 'Tensor'.") + + tid, = tids + site = self.fermion_space.tensor_order[tid][1] + TensorNetwork._pop_tensor(tid) + TensorNetwork.add_tensor(tensor, tid=tid, virtual=True) + self.fermion_space.replace_tensor(site, tensor, tid=tid, virtual=True) + + def partition_tensors(self, tags, inplace=False, which='any'): + """Split this TN into a list of tensors containing any or all of + ``tags`` and a ``TensorNetwork`` of the the rest. + + Parameters + ---------- + tags : sequence of str + The list of tags to filter the tensors by. Use ``...`` + (``Ellipsis``) to filter all. + inplace : bool, optional + If true, remove tagged tensors from self, else create a new network + with the tensors removed. + which : {'all', 'any'} + Whether to require matching all or any of the tags. + + Returns + ------- + (u_tn, t_ts) : (TensorNetwork, tuple of Tensors) + The untagged tensor network, and the sequence of tagged Tensors. + + See Also + -------- + partition, select, select_tensors + """ + tagged_tids = self._get_tids_from_tags(tags, which=which) + + # check if all tensors have been tagged + if len(tagged_tids) == self.num_tensors: + return None, self.tensor_map.values() + + # Copy untagged to new network, and pop tagged tensors from this + untagged_tn = self if inplace else self.copy() + tagged_ts = tuple(map(untagged_tn._pop_tensor_, sorted(tagged_tids))) + + return untagged_tn, tagged_ts + + def partition(self, tags, which='any', inplace=False): + """Split this TN into two, based on which tensors have any or all of + ``tags``. Unlike ``partition_tensors``, both results are TNs which + inherit the structure of the initial TN. + + Parameters + ---------- + tags : sequence of str + The tags to split the network with. + which : {'any', 'all'} + Whether to split based on matching any or all of the tags. + inplace : bool + If True, actually remove the tagged tensors from self. + + Returns + ------- + untagged_tn, tagged_tn : (TensorNetwork, TensorNetwork) + The untagged and tagged tensor networs. + + See Also + -------- + partition_tensors, select, select_tensors + """ + tagged_tids = self._get_tids_from_tags(tags, which=which) + + kws = {'check_collisions': False} + + if inplace: + t1 = self + t2s = [t1._pop_tensor_(tid) for tid in tagged_tids] + t2 = FermionTensorNetwork(t2s, **kws) + t2.view_like_(self) + + else: # rebuild both -> quicker + new_fs = self.fermion_space.copy() + t1_site = [] + t2_site = [] + for tid in self.tensor_map.keys(): + (t2_site if tid in tagged_tids else t1_site).append(self.fermion_space[tid][1]) + t1 = new_fs.to_tensor_network(t1_site) + t2 = new_fs.to_tensor_network(t2_site) + t1.view_like_(self) + t2.view_like_(self) + + return t1, t2 + + def replace_with_svd(self, where, left_inds, eps, *, which='any', + right_inds=None, method='isvd', max_bond=None, + absorb='both', cutoff_mode='rel', renorm=None, + ltags=None, rtags=None, keep_tags=True, + start=None, stop=None, inplace=False): + r"""Replace all tensors marked by ``where`` with an iteratively + constructed SVD. E.g. if ``X`` denote ``where`` tensors:: + + :__ ___: + ---X X--X X--- : \ / : + | | | | ==> : U~s~VH---: + ---X--X--X--X--- :__/ \ : + | +--- : \__: + X left_inds : + right_inds + + Parameters + ---------- + where : tag or seq of tags + Tags specifying the tensors to replace. + left_inds : ind or sequence of inds + The indices defining the left hand side of the SVD. + eps : float + The tolerance to perform the SVD with, affects the number of + singular values kept. See + :func:`quimb.linalg.rand_linalg.estimate_rank`. + which : {'any', 'all', '!any', '!all'}, optional + Whether to replace tensors matching any or all the tags ``where``, + prefix with '!' to invert the selection. + right_inds : ind or sequence of inds, optional + The indices defining the right hand side of the SVD, these can be + automatically worked out, but for hermitian decompositions the + order is important and thus can be given here explicitly. + method : str, optional + How to perform the decomposition, if not an iterative method + the subnetwork dense tensor will be formed first, see + :func:`~quimb.tensor.tensor_core.tensor_split` for options. + max_bond : int, optional + The maximum bond to keep, defaults to no maximum (-1). + ltags : sequence of str, optional + Tags to add to the left tensor. + rtags : sequence of str, optional + Tags to add to the right tensor. + keep_tags : bool, optional + Whether to propagate tags found in the subnetwork to both new + tensors or drop them, defaults to ``True``. + start : int, optional + If given, assume can use ``TNLinearOperator1D``. + stop : int, optional + If given, assume can use ``TNLinearOperator1D``. + inplace : bool, optional + Perform operation in place. + + Returns + ------- + + See Also + -------- + replace_with_identity + """ + leave, svd_section = self.partition(where, which=which, + inplace=inplace) + + tags = svd_section.tags if keep_tags else oset() + ltags = tags_to_oset(ltags) + rtags = tags_to_oset(rtags) + + if right_inds is None: + # compute + right_inds = tuple(i for i in svd_section.outer_inds() + if i not in left_inds) + + if (start is None) and (stop is None): + A = svd_section.aslinearoperator(left_inds=left_inds, + right_inds=right_inds) + else: + from .tensor_1d import TNLinearOperator1D + + # check if need to invert start stop as well + if '!' in which: + start, stop = stop, start + self.L + left_inds, right_inds = right_inds, left_inds + ltags, rtags = rtags, ltags + + A = TNLinearOperator1D(svd_section, start=start, stop=stop, + left_inds=left_inds, right_inds=right_inds) + + ltags = tags | ltags + rtags = tags | rtags + + TL, TR = tensor_split(A, left_inds=left_inds, right_inds=right_inds, + method=method, cutoff=eps, absorb=absorb, + max_bond=max_bond, cutoff_mode=cutoff_mode, + renorm=renorm, ltags=ltags, rtags=rtags) + + leave |= TL + leave |= TR + + return leave + + def contract_between(self, tags1, tags2, **contract_opts): + """Contract the two tensors specified by ``tags1`` and ``tags2`` + respectively. This is an inplace operation. No-op if the tensor + specified by ``tags1`` and ``tags2`` is the same tensor. + + Parameters + ---------- + tags1 : + Tags uniquely identifying the first tensor. + tags2 : str or sequence of str + Tags uniquely identifying the second tensor. + contract_opts + Supplied to :func:`~quimb.tensor.tensor_core.tensor_contract`. + """ + tid1, = self._get_tids_from_tags(tags1, which='all') + tid2, = self._get_tids_from_tags(tags2, which='all') + direction = contract_opts.pop("direction", "left") + + # allow no-op for same tensor specified twice ('already contracted') + if tid1 == tid2: + return + + self._pop_tensor_(tid1) + self._pop_tensor_(tid2) + + out = self.fermion_space._contract_pairs(tid1, tid2, direction=direction, inplace=True) + if isinstance(out, (float, complex)): + return out + else: + self |= out + + def contract_ind(self, ind, **contract_opts): + """Contract tensors connected by ``ind``. + """ + tids = self._get_tids_from_inds(ind) + if len(tids) <= 1: return + ts = [self._pop_tensor_(tid) for tid in tids] + direction = contract_opts.pop("direction", "left") + out = tensor_contract(*ts, direction=direction, inplace=True) + if isinstance(out, (float, complex)): + return out + else: + self |= out + + def replace_tensor(self, tid_or_site, tsr_or_tn, virtual=False): + tid, site, tsr = self.fermion_space[tid_or_site] + istensor = isinstance(tsr_or_tn, FermionTensor) + istensornetwork = isinstance(tsr_or_tn, FermionTensorNetwork) + + pass + + + def replace_section_with_svd(self, start, stop, eps, + **replace_with_svd_opts): + raise NotImplementedError + + def convert_to_zero(self): + raise NotImplementedError + + def compress_between(self, tags1, tags2, **compress_opts): + raise NotImplementedError + + def compress_all(self, inplace=False, **compress_opts): + raise NotImplementedError + + def new_bond(self, tags1, tags2, **opts): + raise NotImplementedError + + def cut_bond(self, bnd, left_ind, right_ind): + raise NotImplementedError + + def cut_between(self, left_tags, right_tags, left_ind, right_ind): + raise NotImplementedError + + + def cut_iter(self, *inds): + raise NotImplementedError diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py new file mode 100644 index 00000000..c03802f7 --- /dev/null +++ b/quimb/tensor/fermion_2d.py @@ -0,0 +1,523 @@ +from .fermion import FermionTensorNetwork, FermionTensor +from .tensor_2d import TensorNetwork2D, TensorNetwork2DVector, PEPS +from .tensor_core import ( + rand_uuid, + oset, + tags_to_oset +) +from collections import defaultdict +from itertools import product +import numpy as np + + +class FermionTensorNetwork2D(FermionTensorNetwork,TensorNetwork2D): + + def _compatible_2d(self, other): + """Check whether ``self`` and ``other`` are compatible 2D tensor + networks such that they can remain a 2D tensor network when combined. + """ + return ( + isinstance(other, FermionTensorNetwork2D) and + all(getattr(self, e) == getattr(other, e) + for e in FermionTensorNetwork2D._EXTRA_PROPS) + ) + + def __and__(self, other): + new = super().__and__(other) + if self._compatible_2d(other): + new.view_as_(FermionTensorNetwork2D, like=self) + return new + + def __or__(self, other): + new = super().__or__(other) + if self._compatible_2d(other): + new.view_as_(FermionTensorNetwork2D, like=self) + return new + + def flatten(self, fuse_multibonds=True, inplace=False): + raise NotImplementedError + + + def _contract_boundary_from_bottom_multi( + self, + xrange, + yrange, + layer_tags, + canonize=True, + compress_sweep='left', + **compress_opts + ): + raise NotImplementedError + + def contract_boundary_from_bottom( + self, + xrange, + yrange=None, + canonize=True, + compress_sweep='left', + layer_tags=None, + inplace=False, + **compress_opts + ): + tn = self if inplace else self.copy() + Lx, Ly = self._Lx, self._Ly + if yrange is None: yrange = (0, Ly-1) + for i in range(min(xrange), max(xrange)): + for j in range(min(yrange), max(yrange) + 1): + tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i + 1, j) + if layer_tags is not None: + for p in range(len(layer_tags)-1): + tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) + tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) + tn.contract_between(tag1, tag2) + return tn + + def contract_boundary_from_top( + self, + xrange, + yrange=None, + canonize=True, + compress_sweep='left', + layer_tags=None, + inplace=False, + **compress_opts + ): + tn = self if inplace else self.copy() + Lx, Ly = self._Lx, self._Ly + if yrange is None: yrange = (0, Ly-1) + for i in range(max(xrange), min(xrange), -1): + for j in range(min(yrange), max(yrange) + 1): + tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i - 1, j) + if layer_tags is not None: + for p in range(len(layer_tags)-1): + tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) + tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) + tn.contract_between(tag1, tag2) + return tn + + def contract_boundary_from_right( + self, + yrange, + xrange=None, + canonize=True, + compress_sweep='down', + layer_tags=None, + inplace=False, + **compress_opts + ): + raise NotImplementedError + + def compute_row_environments(self, **compress_opts): + raise NotImplementedError + + def compute_row_environments(self, layer_tags=None, **compress_opts): + Lx = self._Lx + env_bottom = self.reorder_right_row(layer_tags=layer_tags) + env_top = env_bottom.copy() + row_envs = dict() + + first_row = env_bottom.row_tag(0) + row_envs["below", 0] = FermionTensorNetwork([]) + row_envs['below', 1] = env_bottom.select(first_row).simple_copy() + row_envs['mid', 0] = env_bottom.select(first_row).simple_copy() + + for i in range(2, Lx): + below_row = env_bottom.row_tag(i-1) + row_envs["mid", i-1] = env_bottom.select(below_row).simple_copy() + env_bottom.contract_boundary_from_bottom((i-2, i-1), layer_tags=layer_tags, inplace=True) + row_envs['below', i] = env_bottom.select(below_row).simple_copy() + + last_row = env_bottom.row_tag(Lx-1) + row_envs['mid', Lx-1] = env_bottom.select(last_row).simple_copy() + + row_envs['above', Lx-1] = FermionTensorNetwork([]) + row_envs['above', Lx-2] = env_top.select(last_row).simple_copy() + + for i in range(Lx-3, -1, -1): + env_top.contract_boundary_from_top((i+1, i+2), layer_tags=layer_tags, inplace=True) + row_envs['above', i] = env_top.select(last_row).simple_copy() + + + return row_envs + + def _reorder_from_tid(self, tid_map, inplace=False): + tn = self if inplace else self.copy() + for tid, site in tid_map.items(): + tn.fermion_space.move(tid, site) + return tn + + def reorder(self, direction="ru", layer_tags=None, inplace=False): + Lx, Ly = self._Lx, self._Ly + row_wise = (direction[0] in ["r", "l"]) + iter_dic = {"u": range(Lx), + "d": range(Lx)[::-1], + "r": range(Ly), + "l": range(Ly)[::-1]} + if row_wise: + iterator = product(iter_dic[direction[1]], iter_dic[direction[0]]) + else: + iterator = product(iter_dic[direction[0]], iter_dic[direction[1]]) + position = 0 + tid_map = dict() + for i, j in iterator: + x, y = (i, j) if row_wise else (j, i) + site_tag = self.site_tag(x, y) + if layer_tags is None: + tid, = self._get_tids_from_tags(site_tag) + tid_map[tid] = position + position += 1 + else: + for tag in layer_tags: + tid, = self._get_tids_from_tags((site_tag, tag)) + tid_map[tid] = position + position += 1 + + return self._reorder_from_tid(tid_map, inplace) + + def reorder_upward_column(self, direction="right", layer_tags=None, inplace=False): + direction = "u" + direction[0] + return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + + def reorder_downward_column(self, direction="right", layer_tags=None, inplace=False): + direction = "d" + direction[0] + return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + + def reorder_right_row(self, direction="upward", layer_tags=None, inplace=False): + direction = "r" + direction[0] + return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + + def reorder_left_row(self, direction="upward", layer_tags=None, inplace=False): + direction = "l" + direction[0] + return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + + +class FPEPS(FermionTensorNetwork2D, + PEPS): + + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_site_ind_id', + ) + + def __init__(self, arrays, *, shape='urdlp', tags=None, + site_ind_id='k{},{}', site_tag_id='I{},{}', + row_tag_id='ROW{}', col_tag_id='COL{}', + order_iterator=None, **tn_opts): + + if isinstance(arrays, FPEPS): + super().__init__(arrays) + return + + tags = tags_to_oset(tags) + self._site_ind_id = site_ind_id + self._site_tag_id = site_tag_id + self._row_tag_id = row_tag_id + self._col_tag_id = col_tag_id + + arrays = tuple(tuple(x for x in xs) for xs in arrays) + self._Lx = len(arrays) + self._Ly = len(arrays[0]) + tensors = [] + + # cache for both creating and retrieving indices + ix = defaultdict(rand_uuid) + + if order_iterator is None: + order_iterator = product(range(self.Lx), range(self.Ly)) + for i, j in order_iterator: + array = arrays[i][j] + + # figure out if we need to transpose the arrays from some order + # other than up right down left physical + array_order = shape + if i == self.Lx - 1: + array_order = array_order.replace('u', '') + if j == self.Ly - 1: + array_order = array_order.replace('r', '') + if i == 0: + array_order = array_order.replace('d', '') + if j == 0: + array_order = array_order.replace('l', '') + + # allow convention of missing bonds to be singlet dimensions + if array.ndim != len(array_order): + raise ValueError("array shape not matching array order") + + transpose_order = tuple( + array_order.find(x) for x in 'urdlp' if x in array_order + ) + + if transpose_order != tuple(range(len(array_order))): + array = array.transpose(transpose_order) + + # get the relevant indices corresponding to neighbours + inds = [] + if 'u' in array_order: + inds.append(ix[(i + 1, j), (i, j)]) + if 'r' in array_order: + inds.append(ix[(i, j), (i, j + 1)]) + if 'd' in array_order: + inds.append(ix[(i, j), (i - 1, j)]) + if 'l' in array_order: + inds.append(ix[(i, j - 1), (i, j)]) + inds.append(self.site_ind(i, j)) + + # mix site, row, column and global tags + + ij_tags = tags | oset((self.site_tag(i, j), + self.row_tag(i), + self.col_tag(j))) + # create the site tensor! + tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) + super().__init__(tensors, check_collisions=False, **tn_opts) + + @classmethod + def rand(cls, Lx, Ly, bond_dim, phys_dim=2, + dtype=float, seed=None, parity=None, + **peps_opts): + """Create a random (un-normalized) PEPS. + + Parameters + ---------- + Lx : int + The number of rows. + Ly : int + The number of columns. + bond_dim : int + The bond dimension. + physical : int, optional + The physical index dimension. + dtype : dtype, optional + The dtype to create the arrays with, default is real double. + seed : int, optional + A random seed. + parity: int or int array of (0,1), optional + parity for each site, default is random parity for all sites + peps_opts + Supplied to :class:`~quimb.tensor.tensor_2d.PEPS`. + + Returns + ------- + psi : PEPS + """ + if seed is not None: + np.random.seed(seed) + + arrays = [[None for _ in range(Ly)] for _ in range(Lx)] + + from pyblock3.algebra.fermion import SparseFermionTensor + from pyblock3.algebra.symmetry import SZ, BondInfo + + if isinstance(parity, np.ndarray): + if not parity.shape != (Lx, Ly): + raise ValueError("parity array shape not matching (Lx, Ly)") + elif isinstance(parity, int): + parity = np.ones((Lx, Ly), dtype=int) * (parity % 2) + elif parity is None: + parity = np.random.randint(0,2,Lx*Ly).reshape(Lx, Ly) + else: + raise TypeError("parity type not recoginized") + + vir_info = BondInfo({SZ(0): bond_dim, SZ(1): bond_dim}) + phy_info = BondInfo({SZ(0): phys_dim, SZ(1): phys_dim}) + + for i, j in product(range(Lx), range(Ly)): + + shape = [] + if i != Lx - 1: # bond up + shape.append(vir_info) + if j != Ly - 1: # bond right + shape.append(vir_info) + if i != 0: # bond down + shape.append(vir_info) + if j != 0: # bond left + shape.append(vir_info) + + shape.append(phy_info) + dq = SZ(parity[i][j]) + + arrays[i][j] = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() + + return cls(arrays, **peps_opts) + + +class FermionTensorNetwork2DVector(TensorNetwork2DVector, + FermionTensorNetwork2D, + FermionTensorNetwork): + + + def to_dense(self, *inds_seq, **contract_opts): + raise NotImplementedError + + + def make_norm( + self, + mangle_append='*', + layer_tags=('KET', 'BRA'), + return_all=False, + ): + """Make the norm tensor network of this 2D vector. + + Parameters + ---------- + mangle_append : {str, False or None}, optional + How to mangle the inner indices of the bra. + layer_tags : (str, str), optional + The tags to identify the top and bottom. + return_all : bool, optional + Return the norm, the ket and the bra. + """ + ket = self.copy() + ket.add_tag(layer_tags[0]) + + bra = ket.retag({layer_tags[0]: layer_tags[1]}) + bra = bra.H + if mangle_append: + bra.mangle_inner_(mangle_append) + norm = ket & bra + + if return_all: + return norm, ket, bra + return norm + + def gate( + self, + G, + where, + contract=False, + tags=None, + inplace=False, + info=None, + **compress_opts + ): + check_opt("contract", contract, (False, True, 'split', 'reduce-split')) + + psi = self if inplace else self.copy() + + if is_lone_coo(where): + where = (where,) + else: + where = tuple(where) + ng = len(where) + + dp = psi.phys_dim(*where[0]) + tags = tags_to_oset(tags) + + # allow a matrix to be reshaped into a tensor if it factorizes + # i.e. (4, 4) assumed to be two qubit gate -> (2, 2, 2, 2) + + site_ix = [psi.site_ind(i, j) for i, j in where] + # new indices to join old physical sites to new gate + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + + TG = Tensor(G, inds=site_ix + bnds, tags=tags, left_inds=bnds) + + if contract is False: + # + # │ │ <- site_ix + # GGGGG + # │╱ │╱ <- bnds + # ──●───●── + # ╱ ╱ + # + psi.reindex_(reindex_map) + psi |= TG + return psi + + elif (contract is True) or (ng == 1): + # + # │╱ │╱ + # ──GGGGG── + # ╱ ╱ + # + psi.reindex_(reindex_map) + + # get the sites that used to have the physical indices + site_tids = psi._get_tids_from_inds(bnds, which='any') + + # pop the sites, contract, then re-add + pts = [psi._pop_tensor(tid) for tid in site_tids] + psi |= tensor_contract(*pts, TG) + + return psi + + # following are all based on splitting tensors to maintain structure + ij_a, ij_b = where + + long_range_path_sequence = None + manual_lr_path = False + string = tuple(gen_long_range_path( + *where, sequence=long_range_path_sequence)) + + # the tensors along this string, which will be updated + original_ts = [psi[coo] for coo in string] + + # the len(string) - 1 indices connecting the string + bonds_along = [next(iter(bonds(t1, t2))) + for t1, t2 in pairwise(original_ts)] + + if contract == 'split': + # + # │╱ │╱ │╱ │╱ + # ──GGGGG── ==> ──G┄┄┄G── + # ╱ ╱ ╱ ╱ + # + gate_string_split_( + TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts) + + elif contract == 'reduce-split': + # + # │ │ │ │ + # GGGGG GGG │ │ + # │╱ │╱ ==> ╱│ │ ╱ ==> ╱│ │ ╱ │╱ │╱ + # ──●───●── ──>─●─●─<── ──>─GGG─<── ==> ──G┄┄┄G── + # ╱ ╱ ╱ ╱ ╱ ╱ ╱ ╱ + # + # + gate_string_reduce_split_( + TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts) + + return psi + + + def compute_norm( + self, + layer_tags=('KET', 'BRA'), + **contract_opts, + ): + """Compute the norm of this vector via boundary contraction. + """ + raise NotImplementedError + norm = self.make_norm(layer_tags=layer_tags) + return norm.contract_boundary(layer_tags=layer_tags, **contract_opts) + + def compute_local_expectation( + self, + terms, + normalized=False, + autogroup=True, + contract_optimize='auto-hq', + return_all=False, + plaquette_envs=None, + plaquette_map=None, + **plaquette_env_options, + ): + + raise NotImplementedError + + def normalize( + self, + balance_bonds=False, + equalize_norms=False, + inplace=False, + **boundary_contract_opts, + ): + raise NotImplementedError diff --git a/quimb/tensor/test/test_2d.py b/quimb/tensor/test/test_2d.py new file mode 100644 index 00000000..bc344e8c --- /dev/null +++ b/quimb/tensor/test/test_2d.py @@ -0,0 +1,170 @@ +import quimb as qu +import quimb.tensor as qtn +from quimb.tensor.tensor_2d import PEPS +import numpy as np +from quimb.tensor.fermion_2d import FPEPS +from quimb.tensor.fermion import _fetch_fermion_space +from quimb.tensor.tensor_core import oset +Lx = 2 +Ly = 3 +D = 4 +np.random.seed(3) + +def tensor_compress_bond( + T1, + T2, + reduced=True, + absorb='both', + info=None, + **compress_opts +): + fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) + + site1, site2 = fs[tid1][1], fs[tid2][1] + + if site1 < site2: + Tl, Tr = T1, T2 + tidl, tidr = tid1, tid2 + else: + Tl, Tr = T2, T1 + tidl, tidr = tid2, tid1 + + left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] + right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] + + out = fs._contract_pairs(tidl, tidr, direction="left") + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + return l, r + +def get_err(max_bond=None): + if max_bond is None: max_bond = 2*D**2 + + + psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) + tsr1 = psi[0,0] + tsr2 = psi[1,0] + + for x in range(Lx): + psi.contract_between((0,x), (1,x)) + tsr1 = psi[0,0] + tsr2 = psi[0,1] + + + inds_contr = [i for i in tsr1.inds if i in tsr2.inds] + outinds = [i for i in tsr1.inds if i not in tsr2.inds] + idxa = [tsr1.inds.index(i) for i in inds_contr] + idxb = [tsr2.inds.index(i) for i in inds_contr] + + out = np.tensordot(tsr1.data, tsr2.data, axes=(idxa, idxb)) + + l, r = tensor_compress_bond(tsr1, tsr2, max_bond=max_bond) + + inds_contr = [i for i in l.inds if i in r.inds] + outinds = [i for i in l.inds if i not in r.inds] + idxa = [l.inds.index(i) for i in inds_contr] + idxb = [r.inds.index(i) for i in inds_contr] + fidx = [i for i in l.inds+r.inds if i not in inds_contr] + + out1 = np.tensordot(l.data, r.data, axes=(idxa, idxb)) + + + nblk = out.shapes.shape[0] + + err = [] + for i in range(nblk): + dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) + j = np.where(dlt==0)[0][0] + ist, ied = out.idxs[i], out.idxs[i+1] + jst, jed = out1.idxs[j], out1.idxs[j+1] + err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) + return max(err) + + + + +psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) + +ket = psi.copy() + +layer_tags=('KET', 'BRA') + +ket.add_tag(layer_tags[0]) + + +bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) +bra.mangle_inner_("*") + +norm = bra & ket + +def contract_all(tn): + Lx, Ly = tn._Lx, tn._Ly + for i in range(Lx): + for j in range(Ly): + x1, x2 = tn[i,j] + tn.contract_between(x1.tags, x2.tags) + for i in range(Lx): + for j in range(Ly-1): + x1 = tn[i,j] + x2 = tn[i,j+1] + tn.contract_between(x1.tags, x2.tags) + for i in range(Lx-1): + x1 = tn[i,0] + x2 = tn[i+1,0] + out = tn.contract_between(x1.tags, x2.tags) + return out + +def contract_left(tn): + Lx, Ly = tn._Lx, tn._Ly + for i in range(Lx): + for j in range(Ly): + x1, x2 = tn[i,j] + tn.contract_between(x1.tags, x2.tags) + for j in range(Ly): + for i in range(Lx-1): + x1 = tn[i,j] + x2 = tn[i+1,j] + tn.contract_between(x1.tags, x2.tags) + for i in range(Ly-1): + x1 = tn[0,i] + x2 = tn[0,i+1] + out = tn.contract_between(x1.tags, x2.tags) + return out + + +fs = norm.fermion_space +norm1 = norm.copy() + +size = Lx * Ly +for i in range(size): + norm1.fermion_space.move(2*size-1, 2*i+1) + +out1 = contract_all(norm1) + +tag1 = norm.site_tag(0, 0)#, self.site_tag(i + 1, j) +tag2 = norm.site_tag(0, 1) + +out2 = contract_left(norm) + +print(out1, out2) +#print(hash(x[0]), hash(x[1])) +#norm.contract_boundary() + +#x1, x2 = norm[0,0] +#norm.contract_between(x1.tags, x2.tags) + +#tid1, = norm._get_tids_from_tags(x1.tags, which='all') +#print(tid1) +#tid2, = norm._get_tids_from_tags(x2.tags, which='all') +#print(tid2) +#norm.contract_between(tagged_tids[0], tagged_tids[1]) +#x = norm[0,0] +#print(x[0].fermion_owner[2],x[1].fermion_owner[2])#, type(x[1])) + +#print(tid1) +#print(tid2) +#print(hash(bra[0,0]), hash(ket[0,0])) +exit() +for x in range(Lx-1): + for y in range(Ly-1): + out = psi.contract_between((0,x), (1,x)) + print(x, y, "done") diff --git a/quimb/tensor/test/test_contract.py b/quimb/tensor/test/test_contract.py new file mode 100644 index 00000000..924060e2 --- /dev/null +++ b/quimb/tensor/test/test_contract.py @@ -0,0 +1,76 @@ +import unittest +import numpy as np +from pyblock3.algebra.symmetry import SZ, BondInfo +from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor +from quimb.tensor import fermion +import copy +import quimb as qu + +np.random.seed(3) +x = SZ(0,0,0) +y = SZ(1,0,0) +infox = BondInfo({x:3, y: 2}) + +infoy = BondInfo({x:5, y: 5}) + + +asp = SparseFermionTensor.random((infoy,infox,infox), dq=y) +abc = FlatFermionTensor.from_sparse(asp) + +bsp = SparseFermionTensor.random((infox,infox,infox), dq=y) +bcd = FlatFermionTensor.from_sparse(bsp) + +csp = SparseFermionTensor.random((infox,infox,infoy), dq=y) +efa = FlatFermionTensor.from_sparse(csp) + +dsp = SparseFermionTensor.random((infox,infox,infox), dq=y) +def_ = FlatFermionTensor.from_sparse(dsp) + + +def finger(x): + dat = x.data.data + return (dat*np.sin(dat.size)).sum() + +bcef = np.tensordot(abc, efa, axes=[(0,),(2,)]) +efd = np.tensordot(bcef, bcd, axes=[(0,1),(0,1)]) +dat = np.tensordot(efd, def_, axes=[(0,1,2),(1,2,0)]) + +bcef2 = np.tensordot(bcd, def_, axes=[(2,),(0,)]) +dat1 = np.tensordot(bcef, bcef2, axes=[(0,1,2,3),(0,1,2,3)]) + +x = fermion.FermionTensor(abc, inds=['a','b','c'], tags=["x"]) +y = fermion.FermionTensor(efa, inds=['e','f','a'], tags=["y"]) +z = fermion.FermionTensor(bcd, inds=['b','c','d'], tags=["z"]) +w = fermion.FermionTensor(def_, inds=['d','e','f'], tags=["w"]) + +tn = fermion.FermionTensorNetwork((x, y, z, w)) + +tn1 = tn.copy() +tn2 = tn.copy() + +tn.contract_between(["x"], ["y"]) +tn.contract_between(["x", "y"], ["w"]) +out = tn.contract_between(["x", "y", "w"], ["z"]) + +print(dat.data[0], dat1.data[0], out) + +tn1.contract_between(["x"], ["z"]) +tn1.contract_between(["y"], ["w"]) +out = tn1.contract_between(["x", "z"], ["y","w"]) +print(dat.data[0], dat1.data[0], out) + + +tids = tn2._get_tids_from_inds(["b","c"]) +tn2.contract_ind(["b","c"]) +tn2.contract_ind(["a"]) +out = tn2.contract_ind(["f"]) +print(dat.data[0], dat1.data[0], out) + + +fs = fermion.FermionSpace() +fs.add_tensor(x, virtual=True) +fs.add_tensor(y, virtual=True) +fs.add_tensor(z, virtual=True) +fs.add_tensor(w, virtual=True) +out = fermion.tensor_contract(w, y, z, x, inplace=True, direction="right") +print(dat.data[0], dat1.data[0], out) diff --git a/quimb/tensor/test/test_norm.py b/quimb/tensor/test/test_norm.py new file mode 100644 index 00000000..cda140e9 --- /dev/null +++ b/quimb/tensor/test/test_norm.py @@ -0,0 +1,163 @@ +import quimb as qu +import quimb.tensor as qtn +from quimb.tensor.tensor_2d import PEPS +import numpy as np +from quimb.tensor.fermion_2d import FPEPS +from quimb.tensor.fermion import _fetch_fermion_space, FermionTensor, FermionTensorNetwork +from quimb.tensor.tensor_core import oset +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.symmetry import SZ +from pyblock3.algebra.fermion import SparseFermionTensor + +Lx = 2 +Ly = 3 +D = 2 +np.random.seed(3) + +def tensor_compress_bond( + T1, + T2, + reduced=True, + absorb='both', + info=None, + **compress_opts +): + fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) + + site1, site2 = fs[tid1][1], fs[tid2][1] + + if site1 < site2: + Tl, Tr = T1, T2 + tidl, tidr = tid1, tid2 + else: + Tl, Tr = T2, T1 + tidl, tidr = tid2, tid1 + + left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] + right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] + + out = fs._contract_pairs(tidl, tidr, direction="left") + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + return l, r + +def get_err(max_bond=None): + if max_bond is None: max_bond = 2*D**2 + + + psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) + tsr1 = psi[0,0] + tsr2 = psi[1,0] + + for x in range(Lx): + psi.contract_between((0,x), (1,x)) + tsr1 = psi[0,0] + tsr2 = psi[0,1] + + + inds_contr = [i for i in tsr1.inds if i in tsr2.inds] + outinds = [i for i in tsr1.inds if i not in tsr2.inds] + idxa = [tsr1.inds.index(i) for i in inds_contr] + idxb = [tsr2.inds.index(i) for i in inds_contr] + + out = np.tensordot(tsr1.data, tsr2.data, axes=(idxa, idxb)) + + l, r = tensor_compress_bond(tsr1, tsr2, max_bond=max_bond) + + inds_contr = [i for i in l.inds if i in r.inds] + outinds = [i for i in l.inds if i not in r.inds] + idxa = [l.inds.index(i) for i in inds_contr] + idxb = [r.inds.index(i) for i in inds_contr] + fidx = [i for i in l.inds+r.inds if i not in inds_contr] + + out1 = np.tensordot(l.data, r.data, axes=(idxa, idxb)) + + + nblk = out.shapes.shape[0] + + err = [] + for i in range(nblk): + dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) + j = np.where(dlt==0)[0][0] + ist, ied = out.idxs[i], out.idxs[i+1] + jst, jed = out1.idxs[j], out1.idxs[j+1] + err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) + return max(err) + +dtype = "complex" +mat1 = np.zeros([2,2], dtype=dtype) +mat1[1,0] = mat1[0,1] = 0.5 +blk = [SubTensor(reduced=mat1, q_labels=(SZ(0), SZ(0)))] +mat1 = np.zeros([2,2],dtype=dtype) +mat1[1,0] = 2**0.5*.5j +blk += [SubTensor(reduced=mat1, q_labels=(SZ(1), SZ(1)))] + +x = FermionTensor(SparseFermionTensor(blocks=blk).to_flat(), inds=["a","b"]) + +y = x.H.data +out = np.tensordot(x.data, y, axes=((0,1),(1,0))) +print(out.data) + +L, R = x.split(left_inds=["a"], get="tensors") + +array = [[L.data,],[R.data,]] + +psi = FPEPS(array, shape="rldpu") #WARNING +ket = psi.copy() +layer_tags=('KET', 'BRA') + +ket.add_tag(layer_tags[0]) +bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) #WARNING +bra.mangle_inner_("*") + +L = ket[0,0] +R = ket[1,0] +L1 = bra[0,0] +R1 = bra[1,0] + +tn = FermionTensorNetwork((R1,L1,L,R)) +fs = tn.fermion_space +fs._contract_pairs(0,1) # WARNING +#fs._contract_pairs(1,2) +fs._contract_pairs(0,1) +out = fs._contract_pairs(0,1) + +norm = bra & ket +def contract_all(tn): + Lx, Ly = tn._Lx, tn._Ly + for i in range(Lx): + for j in range(Ly): + x1, x2 = tn[i,j] + tn.contract_between(x1.tags, x2.tags) + for i in range(Lx): + for j in range(Ly-1): + x1 = tn[i,j] + x2 = tn[i,j+1] + tn.contract_between(x1.tags, x2.tags) + for i in range(Lx-1): + x1 = tn[i,0] + x2 = tn[i+1,0] + out = tn.contract_between(x1.tags, x2.tags) + return out + +def contract_left(tn): + Lx, Ly = tn._Lx, tn._Ly + for i in range(Lx): + for j in range(Ly): + x1, x2 = tn[i,j] + tn.contract_between(x1.tags, x2.tags) + for j in range(Ly): + for i in range(Lx-1): + x1 = tn[i,j] + x2 = tn[i+1,j] + out = tn.contract_between(x1.tags, x2.tags) + for i in range(Ly-1): + x1 = tn[0,i] + x2 = tn[0,i+1] + out = tn.contract_between(x1.tags, x2.tags) + return out + +norm1 = norm.copy() +out = contract_all(norm) +print(out) +out1 = contract_left(norm1) +print(out1) diff --git a/quimb/tensor/test/test_row_env.py b/quimb/tensor/test/test_row_env.py new file mode 100644 index 00000000..cb45a9df --- /dev/null +++ b/quimb/tensor/test/test_row_env.py @@ -0,0 +1,136 @@ +import quimb as qu +import quimb.tensor as qtn +from quimb.tensor.tensor_2d import PEPS +import numpy as np +from quimb.tensor.fermion_2d import FPEPS +from quimb.tensor.fermion import _fetch_fermion_space, FermionTensorNetwork +from quimb.tensor.tensor_core import oset +from itertools import product +Lx = 3 +Ly = 3 +D = 2 +np.random.seed(3) + +def tensor_compress_bond( + T1, + T2, + reduced=True, + absorb='both', + info=None, + **compress_opts +): + fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) + + site1, site2 = fs[tid1][1], fs[tid2][1] + + if site1 < site2: + Tl, Tr = T1, T2 + tidl, tidr = tid1, tid2 + else: + Tl, Tr = T2, T1 + tidl, tidr = tid2, tid1 + + left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] + right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] + + out = fs._contract_pairs(tidl, tidr, direction="left") + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + return l, r + +def get_err(max_bond=None): + if max_bond is None: max_bond = 2*D**2 + + + psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) + tsr1 = psi[0,0] + tsr2 = psi[1,0] + + for x in range(Lx): + psi.contract_between((0,x), (1,x)) + tsr1 = psi[0,0] + tsr2 = psi[0,1] + + + inds_contr = [i for i in tsr1.inds if i in tsr2.inds] + outinds = [i for i in tsr1.inds if i not in tsr2.inds] + idxa = [tsr1.inds.index(i) for i in inds_contr] + idxb = [tsr2.inds.index(i) for i in inds_contr] + + out = np.tensordot(tsr1.data, tsr2.data, axes=(idxa, idxb)) + + l, r = tensor_compress_bond(tsr1, tsr2, max_bond=max_bond) + + inds_contr = [i for i in l.inds if i in r.inds] + outinds = [i for i in l.inds if i not in r.inds] + idxa = [l.inds.index(i) for i in inds_contr] + idxb = [r.inds.index(i) for i in inds_contr] + fidx = [i for i in l.inds+r.inds if i not in inds_contr] + + out1 = np.tensordot(l.data, r.data, axes=(idxa, idxb)) + + + nblk = out.shapes.shape[0] + + err = [] + for i in range(nblk): + dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) + j = np.where(dlt==0)[0][0] + ist, ied = out.idxs[i], out.idxs[i+1] + jst, jed = out1.idxs[j], out1.idxs[j+1] + err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) + return max(err) + +def contract_all(tn): + Lx, Ly = tn._Lx, tn._Ly + nsite = Lx * Ly * 2 + fs = tn.fermion_space + for x in range(nsite-1): + out = fs._contract_pairs(0, 1) + return out + + + + +psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) + +ket = psi.copy() + +layer_tags=('KET', 'BRA') + +ket.add_tag(layer_tags[0]) + + +bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) +bra.mangle_inner_("*") +norm = bra & ket + +norm_ur = norm.reorder_upward_column(layer_tags=layer_tags) +out = contract_all(norm_ur) +norm_dl = norm.reorder_downward_column(direction="left", layer_tags=layer_tags) +norm_rd = norm.reorder_right_row(direction="down",layer_tags=layer_tags) +norm_lu = norm.reorder_left_row(direction="up",layer_tags=layer_tags) + + +row_envs = norm.compute_row_environments(layer_tags=layer_tags) +print("TESTING ROW ENVIRONMENTS") +for ix in range(Lx): + tmp = row_envs["below", ix].copy() + tmp.add_tensor_network(row_envs["mid", ix]) + tmp.add_tensor_network(row_envs["above", ix]) + fs = tmp.fermion_space + for i in range(len(fs.tensor_order.keys())-1): + out = fs._contract_pairs(0,1) + print("ROW%i env + mid: %.6f"%(ix, out)) + + + +out = contract_all(norm) +print(out) +out_ur = contract_all(norm_ur) +print(out_ur) +out_dl = contract_all(norm_dl) +print(out_dl) +out_rd = contract_all(norm_rd) +print(out_rd) +out_lu = contract_all(norm_lu) +print(out_lu) From 5c0c41d262b09e1eb4b10f4fa4dc1eafe1d044d0 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 16 Dec 2020 16:22:57 -0800 Subject: [PATCH 02/61] col env added & cleanup --- quimb/tensor/fermion_2d.py | 79 +++++++++++++++---- .../test/{test_row_env.py => test_env.py} | 13 ++- 2 files changed, 75 insertions(+), 17 deletions(-) rename quimb/tensor/test/{test_row_env.py => test_env.py} (89%) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index c03802f7..d084894e 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -37,18 +37,6 @@ def __or__(self, other): def flatten(self, fuse_multibonds=True, inplace=False): raise NotImplementedError - - def _contract_boundary_from_bottom_multi( - self, - xrange, - yrange, - layer_tags, - canonize=True, - compress_sweep='left', - **compress_opts - ): - raise NotImplementedError - def contract_boundary_from_bottom( self, xrange, @@ -105,10 +93,41 @@ def contract_boundary_from_right( inplace=False, **compress_opts ): - raise NotImplementedError + tn = self if inplace else self.copy() + Lx, Ly = self._Lx, self._Ly + if xrange is None: xrange = (0, Lx-1) + for j in range(max(yrange), min(yrange), -1): + for i in range(min(xrange), max(xrange) + 1): + tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i, j-1) + if layer_tags is not None: + for p in range(len(layer_tags)-1): + tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) + tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) + tn.contract_between(tag1, tag2) + return tn - def compute_row_environments(self, **compress_opts): - raise NotImplementedError + def contract_boundary_from_left( + self, + yrange, + xrange=None, + canonize=True, + compress_sweep='down', + layer_tags=None, + inplace=False, + **compress_opts + ): + tn = self if inplace else self.copy() + Lx, Ly = self._Lx, self._Ly + if xrange is None: xrange = (0, Lx-1) + for j in range(min(yrange), max(yrange)): + for i in range(min(xrange), max(xrange) + 1): + tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i, j+1) + if layer_tags is not None: + for p in range(len(layer_tags)-1): + tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) + tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) + tn.contract_between(tag1, tag2) + return tn def compute_row_environments(self, layer_tags=None, **compress_opts): Lx = self._Lx @@ -137,9 +156,37 @@ def compute_row_environments(self, layer_tags=None, **compress_opts): env_top.contract_boundary_from_top((i+1, i+2), layer_tags=layer_tags, inplace=True) row_envs['above', i] = env_top.select(last_row).simple_copy() - return row_envs + def compute_col_environments(self, layer_tags=None, **compress_opts): + Ly = self._Ly + env_left = self.reorder_upward_column(layer_tags=layer_tags) + env_right = env_left.copy() + col_envs = dict() + + first_col = env_left.col_tag(0) + col_envs["left", 0] = FermionTensorNetwork([]) + col_envs['left', 1] = env_left.select(first_col).simple_copy() + col_envs['mid', 0] = env_left.select(first_col).simple_copy() + + for i in range(2, Ly): + left_row = env_left.col_tag(i-1) + col_envs["mid", i-1] = env_left.select(left_row).simple_copy() + env_left.contract_boundary_from_left((i-2, i-1), layer_tags=layer_tags, inplace=True) + col_envs['left', i] = env_left.select(left_row).simple_copy() + + last_col = env_left.col_tag(Ly-1) + col_envs['mid', Ly-1] = env_left.select(last_col).simple_copy() + + col_envs['right', Ly-1] = FermionTensorNetwork([]) + col_envs['right', Ly-2] = env_right.select(last_col).simple_copy() + + for i in range(Ly-3, -1, -1): + env_right.contract_boundary_from_right((i+1, i+2), layer_tags=layer_tags, inplace=True) + col_envs['right', i] = env_right.select(last_col).simple_copy() + + return col_envs + def _reorder_from_tid(self, tid_map, inplace=False): tn = self if inplace else self.copy() for tid, site in tid_map.items(): diff --git a/quimb/tensor/test/test_row_env.py b/quimb/tensor/test/test_env.py similarity index 89% rename from quimb/tensor/test/test_row_env.py rename to quimb/tensor/test/test_env.py index cb45a9df..f9b9428b 100644 --- a/quimb/tensor/test/test_row_env.py +++ b/quimb/tensor/test/test_env.py @@ -111,6 +111,8 @@ def contract_all(tn): norm_lu = norm.reorder_left_row(direction="up",layer_tags=layer_tags) + + row_envs = norm.compute_row_environments(layer_tags=layer_tags) print("TESTING ROW ENVIRONMENTS") for ix in range(Lx): @@ -122,7 +124,16 @@ def contract_all(tn): out = fs._contract_pairs(0,1) print("ROW%i env + mid: %.6f"%(ix, out)) - +col_envs = norm.compute_col_environments(layer_tags=layer_tags) +print("TESTING COL ENVIRONMENTS") +for ix in range(Ly): + tmp = col_envs["left", ix].copy() + tmp.add_tensor_network(col_envs["mid", ix]) + tmp.add_tensor_network(col_envs["right", ix]) + fs = tmp.fermion_space + for i in range(len(fs.tensor_order.keys())-1): + out = fs._contract_pairs(0,1) + print("COL%i env + mid: %.6f"%(ix, out)) out = contract_all(norm) print(out) From 7fa7ce2e0b67009e67134dbf98a848c52e25aa1e Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 17 Dec 2020 15:17:19 -0800 Subject: [PATCH 03/61] remove _move/left func, setup for bond compress --- quimb/tensor/fermion.py | 101 +++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 52 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 9d06a252..650a642c 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -149,13 +149,17 @@ def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False) if not isinstance(out, (float, complex)): _output_inds = out.inds - if output_inds is None: output_inds = _output_inds + if output_inds is None: + output_inds = _output_inds + else: + output_inds = tuple(output_inds) if set(_output_inds) != set(output_inds): raise TypeError("specified out_inds not allow in tensordot, \ make sure not summation/Hadamard product appears") if output_inds!=_output_inds: - transpose_order = tuple([_output_inds.index(ia) for ia in output_inds]) - out = out.transpose(transpose_order, inplace=True) + #transpose_order = tuple([_output_inds.index(ia) for ia in output_inds]) + #out = out.transpose(transpose_order, inplace=True) + out = out.transpose(output_inds, inplace=True) return out def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=None, cutoff=1e-10, @@ -208,6 +212,41 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N return FermionTensorNetwork(tensors, check_collisions=False, virtual=True) +def tensor_compress_bond( + T1, + T2, + absorb='both', + inplace=True, + info=None, + **compress_opts +): + fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=inplace) + + site1, site2 = fs[tid1][1], fs[tid2][1] + + if site1 < site2: + Tl, Tr = T1, T2 + tidl, tidr = tid1, tid2 + else: + Tl, Tr = T2, T1 + tidl, tidr = tid2, tid1 + + tmp = fs.copy() + + left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] + right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] + + out = fs._contract_pairs(tidl, tidr, direction="left") + out_tid = out.fermion_owner[2] + out_site = fs[out_tid][1] + + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + fs.replace_tensor(out_site, l, tid=tidl, virtual=True) + fs.insert_tensor(out_site+1, r, tid=tidr, virtual=True) + fs.move(tidl, min(site1, site2)) + fs.move(tidr, max(site2, site1)) + return l, r + class FermionSpace: """A labelled, ordered dictionary. The tensor labels point to the tensor and its position inside the fermion space. @@ -315,14 +354,16 @@ def insert_tensor(self, site, tsr, tid=None, virtual=False): """ if (tid is None) or (tid in self.tensor_order.keys()): tid = rand_uuid(base="_T") - if site not in sites: + if site not in self.sites: self.add_tensor(tsr, tid, site=site, virtual=virtual) else: T = tsr if virtual else tsr.copy() T.set_fermion_owner(self, tid) - new_tensor_order = {tid: (T, site)} for atid, (atsr, asite) in self.tensor_order.items(): - new_tensor_order[atid] = (atsr, asite+(asite>=site)) + if asite >= site: + self.tensor_order.update({atid: (atsr, asite+1)}) + self.tensor_order.update({tid: (T, site)}) + def insert(self, site, *tsr, virtual=False): for T in tsr: @@ -366,48 +407,6 @@ def __setitem__(self, site, tsr): else: self.add_tensor(site, tsr) - def _move_left(self, tid_or_site): - """ Switch position for the specified tensor with the tensor to its left - A_{n-1} A_n = \tilda{A}_{n} \tilda{A}_{n-1} - global phase factorized to \tilda{A}_{n-1}, - local phase factorized to \tilda{A}_n - """ - tid, site, tsr = self[tid_or_site] - if site != min(self.sites): - if site-1 not in self.sites: - raise ValueError("left of site %s not occupied"%site) - tid_l, site_l, tsr_l = self[site-1] - if tsr.parity * tsr_l.parity: - tsr_l.data._global_flip() - axes = [] - for ax, s in enumerate(tsr.inds): - if s in tsr_l.inds: - axes.append(ax) - tsr.data._local_flip(axes) - self.tensor_order[tid] = (tsr, site-1) - self.tensor_order[tid_l] = (tsr_l, site) - - def _move_right(self, tid_or_site): - """ Switch position for the specified tensor with the tensor to its left - A_n A_{n+1} = \tilda{A}_{n+1} \tilda{A}_n - global phase factorized to \tilda{A}_n, - local phase factorized to \tilda{A}_{n+1} - """ - tid, site, tsr = self[tid_or_site] - if site != max(self.sites): - if site+1 not in self.sites: - raise ValueError("right of site %s not occupied"%site) - tid_r, site_r, tsr_r = self[site+1] - if tsr.parity * tsr_r.parity: - tsr.data._global_flip() - axes = [] - for ax, s in enumerate(tsr_r.inds): - if s in tsr.inds: - axes.append(ax) - tsr_r.data._local_flip(axes) - self.tensor_order[tid] = (tsr, site+1) - self.tensor_order[tid_r] = (tsr_r, site) - def move(self, tid_or_site, des_site): '''Both local and global phase factorized to the tensor that's being operated on ''' @@ -441,11 +440,9 @@ def make_adjacent(self, tid_or_site1, tid_or_site2, direction='left'): if site1 == site2: return sitemin, sitemax = min(site1, site2), max(site1, site2) if direction == 'left': - for isite in range(sitemax, sitemin+1, -1): - self._move_left(isite) + self.move(sitemax, sitemin+1) elif direction == 'right': - for isite in range(sitemin, sitemax-1): - self._move_right(isite) + self.move(sitemin, sitemax-1) else: raise ValueError("direction %s not recognized"%direction) From 92ee274c385e449bb5529f166a9e88e9ff1582f6 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 22 Dec 2020 16:23:40 -0800 Subject: [PATCH 04/61] compress added --- quimb/tensor/fermion.py | 35 ++++++++++++++++++++++------------- quimb/tensor/fermion_2d.py | 19 +++++++++++++++---- 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 650a642c..82189f1a 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -157,8 +157,6 @@ def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False) raise TypeError("specified out_inds not allow in tensordot, \ make sure not summation/Hadamard product appears") if output_inds!=_output_inds: - #transpose_order = tuple([_output_inds.index(ia) for ia in output_inds]) - #out = out.transpose(transpose_order, inplace=True) out = out.transpose(output_inds, inplace=True) return out @@ -231,8 +229,6 @@ def tensor_compress_bond( Tl, Tr = T2, T1 tidl, tidr = tid2, tid1 - tmp = fs.copy() - left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] @@ -984,7 +980,6 @@ def select(self, tags, which='all'): ts = [self.tensor_map[n] for n in tagged_tids] tn = FermionTensorNetwork(ts, check_collisions=False, virtual=True) tn.view_like_(self) - return tn def __iand__(self, tensor): @@ -1336,13 +1331,30 @@ def contract_ind(self, ind, **contract_opts): else: self |= out - def replace_tensor(self, tid_or_site, tsr_or_tn, virtual=False): - tid, site, tsr = self.fermion_space[tid_or_site] - istensor = isinstance(tsr_or_tn, FermionTensor) - istensornetwork = isinstance(tsr_or_tn, FermionTensorNetwork) + def _compress_between_tids( + self, + tid1, + tid2, + canonize_distance=None, + canonize_opts=None, + equalize_norms=False, + **compress_opts + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + + if canonize_distance: + raise NotImplementedError - pass + l, r = tensor_compress_bond(Tl, Tr, inplace=True, **compress_opts) + new_tid1 = l.fermion_owner[2] + new_tid2 = r.fermion_owner[2] + self.tensor_map[new_tid1] = l + self.tensor_map[new_tid2] = r + + if equalize_norms: + raise NotImplementedError def replace_section_with_svd(self, start, stop, eps, **replace_with_svd_opts): @@ -1351,9 +1363,6 @@ def replace_section_with_svd(self, start, stop, eps, def convert_to_zero(self): raise NotImplementedError - def compress_between(self, tags1, tags2, **compress_opts): - raise NotImplementedError - def compress_all(self, inplace=False, **compress_opts): raise NotImplementedError diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index d084894e..d657dbe2 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -58,6 +58,8 @@ def contract_boundary_from_bottom( tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) tn.contract_between(tag1, tag2) + self.compress_row(i, sweep=compress_sweep, + yrange=yrange, **compress_opts) return tn def contract_boundary_from_top( @@ -81,6 +83,9 @@ def contract_boundary_from_top( tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) tn.contract_between(tag1, tag2) + + self.compress_row(i, sweep=compress_sweep, + yrange=yrange, **compress_opts) return tn def contract_boundary_from_right( @@ -104,6 +109,9 @@ def contract_boundary_from_right( tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) tn.contract_between(tag1, tag2) + + self.compress_column(j, sweep=compress_sweep, + xrange=xrange, **compress_opts) return tn def contract_boundary_from_left( @@ -127,6 +135,9 @@ def contract_boundary_from_left( tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) tn.contract_between(tag1, tag2) + + self.compress_column(j, sweep=compress_sweep, + xrange=xrange, **compress_opts) return tn def compute_row_environments(self, layer_tags=None, **compress_opts): @@ -143,7 +154,7 @@ def compute_row_environments(self, layer_tags=None, **compress_opts): for i in range(2, Lx): below_row = env_bottom.row_tag(i-1) row_envs["mid", i-1] = env_bottom.select(below_row).simple_copy() - env_bottom.contract_boundary_from_bottom((i-2, i-1), layer_tags=layer_tags, inplace=True) + env_bottom.contract_boundary_from_bottom((i-2, i-1), layer_tags=layer_tags, inplace=True, **compress_opts) row_envs['below', i] = env_bottom.select(below_row).simple_copy() last_row = env_bottom.row_tag(Lx-1) @@ -153,7 +164,7 @@ def compute_row_environments(self, layer_tags=None, **compress_opts): row_envs['above', Lx-2] = env_top.select(last_row).simple_copy() for i in range(Lx-3, -1, -1): - env_top.contract_boundary_from_top((i+1, i+2), layer_tags=layer_tags, inplace=True) + env_top.contract_boundary_from_top((i+1, i+2), layer_tags=layer_tags, inplace=True, **compress_opts) row_envs['above', i] = env_top.select(last_row).simple_copy() return row_envs @@ -172,7 +183,7 @@ def compute_col_environments(self, layer_tags=None, **compress_opts): for i in range(2, Ly): left_row = env_left.col_tag(i-1) col_envs["mid", i-1] = env_left.select(left_row).simple_copy() - env_left.contract_boundary_from_left((i-2, i-1), layer_tags=layer_tags, inplace=True) + env_left.contract_boundary_from_left((i-2, i-1), layer_tags=layer_tags, inplace=True, **compress_opts) col_envs['left', i] = env_left.select(left_row).simple_copy() last_col = env_left.col_tag(Ly-1) @@ -182,7 +193,7 @@ def compute_col_environments(self, layer_tags=None, **compress_opts): col_envs['right', Ly-2] = env_right.select(last_col).simple_copy() for i in range(Ly-3, -1, -1): - env_right.contract_boundary_from_right((i+1, i+2), layer_tags=layer_tags, inplace=True) + env_right.contract_boundary_from_right((i+1, i+2), layer_tags=layer_tags, inplace=True, **compress_opts) col_envs['right', i] = env_right.select(last_col).simple_copy() return col_envs From c5dc4678d2339126bd46d77054f796e2daf91435 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 6 Jan 2021 13:00:07 -0800 Subject: [PATCH 05/61] bugfix for ftensor shape; fermion_2d.compute_row/col_environment conform to quimb original implementation --- quimb/tensor/fermion.py | 37 +++++-- quimb/tensor/fermion_2d.py | 212 +++++++++++++------------------------ 2 files changed, 99 insertions(+), 150 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 82189f1a..e4d63550 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -237,6 +237,8 @@ def tensor_compress_bond( out_site = fs[out_tid][1] l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + l.modify(tags=Tl.tags) + r.modify(tags=Tr.tags) fs.replace_tensor(out_site, l, tid=tidl, virtual=True) fs.insert_tensor(out_site+1, r, tid=tidr, virtual=True) fs.move(tidl, min(site1, site2)) @@ -682,7 +684,7 @@ def shape(self): """Return the "inflated" shape composed of maximal size for each leg """ shapes = self.shapes - return np.amax(shapes, axis=0) + return tuple(np.amax(shapes, axis=0)) @functools.wraps(tensor_split) def split(self, *args, **kwargs): @@ -1331,6 +1333,27 @@ def contract_ind(self, ind, **contract_opts): else: self |= out + def contract_tags(self, tags, inplace=False, which='any', **opts): + + tids = self._get_tids_from_tags(tags, which='any') + if len(tids) == 0: + raise ValueError("No tags were found - nothing to contract. " + "(Change this to a no-op maybe?)") + elif len(tids) == 1: + return self + + untagged_tn, tagged_ts = self.partition_tensors( + tags, inplace=inplace, which=which) + + + contracted = tensor_contract(*tagged_ts, inplace=True, **opts) + + if untagged_tn is None: + return contracted + + untagged_tn.add_tensor(contracted, virtual=True) + return untagged_tn + def _compress_between_tids( self, tid1, @@ -1340,18 +1363,12 @@ def _compress_between_tids( equalize_norms=False, **compress_opts ): - Tl = self.tensor_map[tid1] - Tr = self.tensor_map[tid2] - if canonize_distance: - raise NotImplementedError + Tl = self._pop_tensor_(tid1) + Tr = self._pop_tensor_(tid2) l, r = tensor_compress_bond(Tl, Tr, inplace=True, **compress_opts) - - new_tid1 = l.fermion_owner[2] - new_tid2 = r.fermion_owner[2] - self.tensor_map[new_tid1] = l - self.tensor_map[new_tid2] = r + self |= (l, r) if equalize_norms: raise NotImplementedError diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index d657dbe2..1550dfdc 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -37,163 +37,94 @@ def __or__(self, other): def flatten(self, fuse_multibonds=True, inplace=False): raise NotImplementedError - def contract_boundary_from_bottom( - self, - xrange, - yrange=None, - canonize=True, - compress_sweep='left', - layer_tags=None, - inplace=False, - **compress_opts - ): - tn = self if inplace else self.copy() - Lx, Ly = self._Lx, self._Ly - if yrange is None: yrange = (0, Ly-1) - for i in range(min(xrange), max(xrange)): - for j in range(min(yrange), max(yrange) + 1): - tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i + 1, j) - if layer_tags is not None: - for p in range(len(layer_tags)-1): - tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) - tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) - tn.contract_between(tag1, tag2) - self.compress_row(i, sweep=compress_sweep, - yrange=yrange, **compress_opts) - return tn - - def contract_boundary_from_top( - self, - xrange, - yrange=None, - canonize=True, - compress_sweep='left', - layer_tags=None, - inplace=False, - **compress_opts - ): - tn = self if inplace else self.copy() - Lx, Ly = self._Lx, self._Ly - if yrange is None: yrange = (0, Ly-1) - for i in range(max(xrange), min(xrange), -1): - for j in range(min(yrange), max(yrange) + 1): - tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i - 1, j) - if layer_tags is not None: - for p in range(len(layer_tags)-1): - tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) - tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) - tn.contract_between(tag1, tag2) - - self.compress_row(i, sweep=compress_sweep, - yrange=yrange, **compress_opts) - return tn + def canonize_row(self, i, sweep, yrange=None, **canonize_opts): + pass - def contract_boundary_from_right( - self, - yrange, - xrange=None, - canonize=True, - compress_sweep='down', - layer_tags=None, - inplace=False, - **compress_opts - ): - tn = self if inplace else self.copy() - Lx, Ly = self._Lx, self._Ly - if xrange is None: xrange = (0, Lx-1) - for j in range(max(yrange), min(yrange), -1): - for i in range(min(xrange), max(xrange) + 1): - tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i, j-1) - if layer_tags is not None: - for p in range(len(layer_tags)-1): - tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) - tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) - tn.contract_between(tag1, tag2) - - self.compress_column(j, sweep=compress_sweep, - xrange=xrange, **compress_opts) - return tn + def canonize_column(self, j, sweep, xrange=None, **canonize_opts): + pass - def contract_boundary_from_left( - self, - yrange, - xrange=None, - canonize=True, - compress_sweep='down', - layer_tags=None, - inplace=False, - **compress_opts - ): - tn = self if inplace else self.copy() - Lx, Ly = self._Lx, self._Ly - if xrange is None: xrange = (0, Lx-1) - for j in range(min(yrange), max(yrange)): - for i in range(min(xrange), max(xrange) + 1): - tag1, tag2 = tn.site_tag(i, j), tn.site_tag(i, j+1) - if layer_tags is not None: - for p in range(len(layer_tags)-1): - tn.contract_between((tag1, layer_tags[p]), (tag1, layer_tags[p+1])) - tn.contract_between((tag2, layer_tags[p]), (tag2, layer_tags[p+1])) - tn.contract_between(tag1, tag2) - - self.compress_column(j, sweep=compress_sweep, - xrange=xrange, **compress_opts) - return tn - - def compute_row_environments(self, layer_tags=None, **compress_opts): - Lx = self._Lx - env_bottom = self.reorder_right_row(layer_tags=layer_tags) + def compute_row_environments(self, dense=False, **compress_opts): + layer_tags = compress_opts.get("layer_tags", None) + reorder_tags = compress_opts.pop("reorder_tags", layer_tags) + env_bottom = self.reorder_right_row(layer_tags=reorder_tags) env_top = env_bottom.copy() + row_envs = dict() - first_row = env_bottom.row_tag(0) - row_envs["below", 0] = FermionTensorNetwork([]) - row_envs['below', 1] = env_bottom.select(first_row).simple_copy() + # upwards pass + row_envs['below', 0] = FermionTensorNetwork([]) + first_row = self.row_tag(0) + if dense: + env_bottom ^= first_row row_envs['mid', 0] = env_bottom.select(first_row).simple_copy() - - for i in range(2, Lx): + row_envs['below', 1] = env_bottom.select(first_row).simple_copy() + for i in range(2, env_bottom.Lx): below_row = env_bottom.row_tag(i-1) row_envs["mid", i-1] = env_bottom.select(below_row).simple_copy() - env_bottom.contract_boundary_from_bottom((i-2, i-1), layer_tags=layer_tags, inplace=True, **compress_opts) - row_envs['below', i] = env_bottom.select(below_row).simple_copy() - - last_row = env_bottom.row_tag(Lx-1) - row_envs['mid', Lx-1] = env_bottom.select(last_row).simple_copy() - - row_envs['above', Lx-1] = FermionTensorNetwork([]) - row_envs['above', Lx-2] = env_top.select(last_row).simple_copy() - - for i in range(Lx-3, -1, -1): - env_top.contract_boundary_from_top((i+1, i+2), layer_tags=layer_tags, inplace=True, **compress_opts) + if dense: + env_bottom ^= (self.row_tag(i - 2), self.row_tag(i - 1)) + else: + env_bottom.contract_boundary_from_bottom_( + (i - 2, i - 1), **compress_opts) + row_envs['below', i] = env_bottom.select(first_row).simple_copy() + + last_row = env_bottom.row_tag(self.Lx-1) + row_envs['mid', self.Lx-1] = env_bottom.select(last_row).simple_copy() + # downwards pass + row_envs['above', self.Lx - 1] = FermionTensorNetwork([]) + last_row = self.row_tag(self.Lx - 1) + if dense: + env_top ^= last_row + row_envs['above', self.Lx - 2] = env_top.select(last_row).simple_copy() + for i in range(env_top.Lx - 3, -1, -1): + if dense: + env_top ^= (self.row_tag(i + 1), self.row_tag(i + 2)) + else: + env_top.contract_boundary_from_top_( + (i + 1, i + 2), **compress_opts) row_envs['above', i] = env_top.select(last_row).simple_copy() return row_envs - def compute_col_environments(self, layer_tags=None, **compress_opts): - Ly = self._Ly - env_left = self.reorder_upward_column(layer_tags=layer_tags) + def compute_col_environments(self, dense=False, **compress_opts): + layer_tags = compress_opts.get("layer_tags", None) + reorder_tags = compress_opts.pop("reorder_tags", layer_tags) + env_left = self.reorder_upward_column(layer_tags=reorder_tags) env_right = env_left.copy() col_envs = dict() - first_col = env_left.col_tag(0) - col_envs["left", 0] = FermionTensorNetwork([]) - col_envs['left', 1] = env_left.select(first_col).simple_copy() + # upwards pass + col_envs['left', 0] = FermionTensorNetwork([]) + first_col = self.col_tag(0) + if dense: + env_left ^= first_col col_envs['mid', 0] = env_left.select(first_col).simple_copy() + col_envs['left', 1] = env_left.select(first_col).simple_copy() - for i in range(2, Ly): - left_row = env_left.col_tag(i-1) - col_envs["mid", i-1] = env_left.select(left_row).simple_copy() - env_left.contract_boundary_from_left((i-2, i-1), layer_tags=layer_tags, inplace=True, **compress_opts) - col_envs['left', i] = env_left.select(left_row).simple_copy() - - last_col = env_left.col_tag(Ly-1) - col_envs['mid', Ly-1] = env_left.select(last_col).simple_copy() - - col_envs['right', Ly-1] = FermionTensorNetwork([]) - col_envs['right', Ly-2] = env_right.select(last_col).simple_copy() - - for i in range(Ly-3, -1, -1): - env_right.contract_boundary_from_right((i+1, i+2), layer_tags=layer_tags, inplace=True, **compress_opts) + for i in range(2, env_left.Ly): + left_col = env_left.col_tag(i-1) + col_envs["mid", i-1] = env_left.select(left_col).simple_copy() + if dense: + env_left ^= (self.col_tag(i - 2), self.col_tag(i - 1)) + else: + env_left.contract_boundary_from_left_( + (i - 2, i - 1), **compress_opts) + col_envs['left', i] = env_left.select(first_col).simple_copy() + + last_col = env_left.col_tag(self.Ly-1) + col_envs['mid', self.Ly-1] = env_left.select(last_col).simple_copy() + # downwards pass + col_envs['right', self.Ly - 1] = FermionTensorNetwork([]) + last_col = self.col_tag(self.Ly - 1) + if dense: + env_right ^= last_col + col_envs['right', self.Ly - 2] = env_right.select(last_col).simple_copy() + for i in range(env_right.Ly - 3, -1, -1): + if dense: + env_right ^= (self.col_tag(i + 1), self.col_tag(i + 2)) + else: + env_right.contract_boundary_from_right_( + (i + 1, i + 2), **compress_opts) col_envs['right', i] = env_right.select(last_col).simple_copy() return col_envs @@ -401,6 +332,7 @@ def rand(cls, Lx, Ly, bond_dim, phys_dim=2, arrays[i][j] = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() + return cls(arrays, **peps_opts) From d0caf9f3b91ed66d2f48cce3bf049d9742eadf7d Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 6 Jan 2021 15:27:53 -0800 Subject: [PATCH 06/61] bugfix in reorder iterator --- quimb/tensor/fermion_2d.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 1550dfdc..5374b395 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -143,23 +143,26 @@ def reorder(self, direction="ru", layer_tags=None, inplace=False): "r": range(Ly), "l": range(Ly)[::-1]} if row_wise: - iterator = product(iter_dic[direction[1]], iter_dic[direction[0]]) - else: iterator = product(iter_dic[direction[0]], iter_dic[direction[1]]) + else: + iterator = product(iter_dic[direction[1]], iter_dic[direction[0]]) position = 0 tid_map = dict() for i, j in iterator: x, y = (i, j) if row_wise else (j, i) site_tag = self.site_tag(x, y) - if layer_tags is None: - tid, = self._get_tids_from_tags(site_tag) - tid_map[tid] = position - position += 1 + tid = self._get_tids_from_tags(site_tag) + if layer_tags is None or len(tid)==1: + tid, = tid + if tid not in tid_map: + tid_map[tid] = position + position += 1 else: for tag in layer_tags: tid, = self._get_tids_from_tags((site_tag, tag)) - tid_map[tid] = position - position += 1 + if tid not in tid_map: + tid_map[tid] = position + position += 1 return self._reorder_from_tid(tid_map, inplace) From bb8008e2a231ed2ada16306c1bdb00fa496fa504 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 8 Jan 2021 12:44:57 -0800 Subject: [PATCH 07/61] 1. bugfix 2. reorganize fermion funcs(compress, contract), canonize added 3. plaquette environment implemented --- quimb/tensor/fermion.py | 165 ++++++++++++++++++++++---------- quimb/tensor/fermion_2d.py | 191 +++++++++++++++++++++++++++++++++---- 2 files changed, 288 insertions(+), 68 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index e4d63550..c03a19f5 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -11,6 +11,36 @@ from ..utils import oset, valmap from .array_ops import asarray, ndim, transpose + +def _contract_connected(tsr1, tsr2, out_inds=None): + ainds, binds = tsr1.inds, tsr2.inds + _output_inds = [] + ax_a, ax_b = [], [] + for kia, ia in enumerate(ainds): + if ia not in binds: + _output_inds.append(ia) + else: + ax_a.append(kia) + ax_b.append(binds.index(ia)) + for kib, ib in enumerate(binds): + if ib not in ainds: + _output_inds.append(ib) + if out_inds is None: out_inds=_output_inds + if set(_output_inds) != set(out_inds): + raise TypeError("specified out_inds not allowed in tensordot, \ + make sure no summation/Hadamard product appears") + + out = np.tensordot(tsr1.data, tsr2.data, axes=[ax_a, ax_b]) + if len(out_inds)==0: + return out.data[0] + + if out_inds!=_output_inds: + transpose_order = tuple([_output_inds.index(ia) for ia in out_inds]) + out = out.transpose(transpose_order) + o_tags = oset.union(*(tsr1.tags, tsr2.tags)) + out = FermionTensor(out, inds=out_inds, tags=o_tags) + return out + def _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds=None, direction='left'): """ Perform pairwise contraction for two tensors in a specified fermion space. If the two tensors are not adjacent, move one of the tensors in the given direction. @@ -47,33 +77,7 @@ def _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds=None, direction='le site2 = site1 + 1 tsr1 = fs[site1][2] tsr2 = fs[site2][2] - ainds, binds = tsr1.inds, tsr2.inds - _output_inds = [] - ax_a, ax_b = [], [] - for kia, ia in enumerate(ainds): - if ia not in binds: - _output_inds.append(ia) - else: - ax_a.append(kia) - ax_b.append(binds.index(ia)) - for kib, ib in enumerate(binds): - if ib not in ainds: - _output_inds.append(ib) - if out_inds is None: out_inds=_output_inds - if set(_output_inds) != set(out_inds): - raise TypeError("specified out_inds not allowed in tensordot, \ - make sure no summation/Hadamard product appears") - - out = np.tensordot(tsr1.data, tsr2.data, axes=[ax_a, ax_b]) - if len(out_inds)==0: - return out.data[0] - - if out_inds!=_output_inds: - transpose_order = tuple([_output_inds.index(ia) for ia in out_inds]) - out = out.transpose(transpose_order) - o_tags = oset.union(*(tsr1.tags, tsr2.tags)) - out = FermionTensor(out, inds=out_inds, tags=o_tags) - return out + return _contract_connected(tsr1, tsr2, out_inds) def _fetch_fermion_space(*tensors, inplace=True): """ Retrieve the FermionSpace and the associated tensor_ids for the tensors. @@ -210,6 +214,13 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N return FermionTensorNetwork(tensors, check_collisions=False, virtual=True) +def _compress_connected(Tl, Tr, absorb='both', **compress_opts): + left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] + right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] + out = _contract_connected(Tl, Tr) + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + return l, r + def tensor_compress_bond( T1, T2, @@ -219,31 +230,49 @@ def tensor_compress_bond( **compress_opts ): fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=inplace) - site1, site2 = fs[tid1][1], fs[tid2][1] - - if site1 < site2: - Tl, Tr = T1, T2 - tidl, tidr = tid1, tid2 + fs.make_adjacent(tid1, tid2) + l, r = _compress_connected(T1, T2, absorb, **compress_opts) + T1.modify(data=l.data, inds=l.inds) + T2.modify(data=r.data, inds=r.inds) + fs.move(tid1, site1) + fs.move(tid2, site2) + return T1, T2 + +def _canonize_connected(T1, T2, absorb='right', **split_opts): + if absorb == 'both': + return _compress_connected(T1, T2, absorb=absorb, **split_opts) + if absorb == "left": + T1, T2 = T2, T1 + + shared_ix, left_env_ix = T1.filter_bonds(T2) + if not shared_ix: + raise ValueError("The tensors specified don't share an bond.") + + new_T1, tRfact = T1.split(left_env_ix, get='tensors', **split_opts) + new_T2 = _contract_connected(tRfact, T2) + if absorb == "left": + return new_T2, new_T1 else: - Tl, Tr = T2, T1 - tidl, tidr = tid2, tid1 + return new_T1, new_T2 - left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] - right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] +def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): - out = fs._contract_pairs(tidl, tidr, direction="left") - out_tid = out.fermion_owner[2] - out_site = fs[out_tid][1] + check_opt('absorb', absorb, ('left', 'both', 'right')) - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) - l.modify(tags=Tl.tags) - r.modify(tags=Tr.tags) - fs.replace_tensor(out_site, l, tid=tidl, virtual=True) - fs.insert_tensor(out_site+1, r, tid=tidr, virtual=True) - fs.move(tidl, min(site1, site2)) - fs.move(tidr, max(site2, site1)) - return l, r + if absorb == 'both': + return tensor_compress_bond(T1, T2, absorb=absorb, **split_opts) + + fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) + site1, site2 = fs[tid1][1], fs[tid2][1] + + fs.make_adjacent(tid1, tid2) + l, r = _canonize_connected(T1, T2, absorb, **split_opts) + T1.modify(data=l.data, inds=l.inds) + T2.modify(data=r.data, inds=r.inds) + fs.move(tid1, site1) + fs.move(tid2, site2) + return T1, T2 class FermionSpace: """A labelled, ordered dictionary. The tensor labels point to the tensor @@ -429,6 +458,22 @@ def move(self, tid_or_site, des_site): if len(axes)>0: tsr.data._local_flip(axes) self.tensor_order[tid] = (tsr, des_site) + def move_past(self, tsr, site_range): + start, end = site_range + iterator = range(start, end) + shared_inds = [] + tid_lst = [self[isite][0] for isite in iterator] + parity = 0 + for itid in tid_lst: + itsr, isite = self.tensor_order[itid] + parity += itsr.parity + shared_inds += list(oset(itsr.inds) & oset(tsr.inds)) + global_parity = (parity % 2) * tsr.data.parity + if global_parity != 0: tsr.data._global_flip() + axes = [tsr.inds.index(i) for i in shared_inds] + if len(axes)>0: tsr.data._local_flip(axes) + return tsr + def make_adjacent(self, tid_or_site1, tid_or_site2, direction='left'): """ Move one tensor in the specified direction to make the two adjacent """ @@ -856,6 +901,12 @@ def __or__(self, other): """ return FermionTensorNetwork((self, other), virtual=True) + def _reorder_from_tid(self, tid_map, inplace=False): + tn = self if inplace else self.copy() + for tid, site in tid_map.items(): + tn.fermion_space.move(tid, site) + return tn + def assemble_with_tensor(self, tsr): if not is_mergeable(self, tsr): raise ValueError("tensor not same in the fermion space of the tensor network") @@ -1364,11 +1415,23 @@ def _compress_between_tids( **compress_opts ): - Tl = self._pop_tensor_(tid1) - Tr = self._pop_tensor_(tid2) + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + tensor_compress_bond(Tl, Tr, inplace=True, **compress_opts) + + if equalize_norms: + raise NotImplementedError - l, r = tensor_compress_bond(Tl, Tr, inplace=True, **compress_opts) - self |= (l, r) + def _canonize_between_tids( + self, + tid1, + tid2, + equalize_norms=False, + **canonize_opts, + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + tensor_canonize_bond(Tl, Tr, **canonize_opts) if equalize_norms: raise NotImplementedError diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 5374b395..33dac2b9 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -37,12 +37,6 @@ def __or__(self, other): def flatten(self, fuse_multibonds=True, inplace=False): raise NotImplementedError - def canonize_row(self, i, sweep, yrange=None, **canonize_opts): - pass - - def canonize_column(self, j, sweep, xrange=None, **canonize_opts): - pass - def compute_row_environments(self, dense=False, **compress_opts): layer_tags = compress_opts.get("layer_tags", None) reorder_tags = compress_opts.pop("reorder_tags", layer_tags) @@ -129,11 +123,169 @@ def compute_col_environments(self, dense=False, **compress_opts): return col_envs - def _reorder_from_tid(self, tid_map, inplace=False): - tn = self if inplace else self.copy() - for tid, site in tid_map.items(): - tn.fermion_space.move(tid, site) - return tn + def _compute_plaquette_environments_row_first( + self, + x_bsz, + y_bsz, + second_dense=None, + row_envs=None, + **compute_environment_opts + ): + if second_dense is None: + second_dense = x_bsz < 2 + + # first we contract from either side to produce column environments + if row_envs is None: + row_envs = self.compute_row_environments( + **compute_environment_opts) + + # next we form vertical strips and contract from both top and bottom + # for each column + col_envs = dict() + for i in range(self.Lx - x_bsz + 1): + # + # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● + # ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ + # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┬ + # | | | | | | | | | | | | | | | | | | | | ┊ x_bsz + # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┴ + # ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ + # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● + # + row_i = FermionTensorNetwork(( + row_envs['below', i], + *[row_envs['mid', i+x] for x in range(x_bsz)], + row_envs['above', i + x_bsz - 1], + ), check_collisions=False).view_as_(FermionTensorNetwork2D, like=self) + # + # y_bsz + # <--> second_dense=True + # ●── ──● + # │ │ ╭── ──╮ + # ●── . . ──● │╭─ . . ─╮│ ┬ + # │ │ or ● ● ┊ x_bsz + # ●── . . ──● │╰─ . . ─╯│ ┴ + # │ │ ╰── ──╯ + # ●── ──● + # 'left' 'right' 'left' 'right' + # + col_envs[i] = row_i.compute_col_environments( + xrange=(max(i - 1, 0), min(i + x_bsz, self.Lx - 1)), + dense=second_dense, **compute_environment_opts) + + plaquette_envs = dict() + for i0, j0 in product(range(self.Lx - x_bsz + 1), + range(self.Ly - y_bsz + 1)): + + # we want to select bordering tensors from: + # + # L──A──A──R <- A from the row environments + # │ │ │ │ + # i0+1 L──●──●──R + # │ │ │ │ <- L, R from the column environments + # i0 L──●──●──R + # │ │ │ │ + # L──B──B──R <- B from the row environments + # + # j0 j0+1 + # + env_ij = FermionTensorNetwork(( + col_envs[i0]['left', j0], + *[col_envs[i0]['mid', ix] for ix in range(j0, j0+y_bsz)], + col_envs[i0]['right', j0 + y_bsz - 1] + ), check_collisions=False) + + ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) + tid_lst = [] + for ij in ij_tags: + tid_lst += list(env_ij._get_tids_from_tags(ij)) + position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) + reorder_map = {i:j for i, j in zip(tid_lst, position)} + env_ij._reorder_from_tid(reorder_map, inplace=True) + plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + + return plaquette_envs + + def _compute_plaquette_environments_col_first( + self, + x_bsz, + y_bsz, + second_dense=None, + col_envs=None, + **compute_environment_opts + ): + if second_dense is None: + second_dense = y_bsz < 2 + + # first we contract from either side to produce column environments + if col_envs is None: + col_envs = self.compute_col_environments( + **compute_environment_opts) + + # next we form vertical strips and contract from both top and bottom + # for each column + row_envs = dict() + for j in range(self.Ly - y_bsz + 1): + # + # y_bsz + # <--> + # + # ╭─╱o─╱o─╮ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o╱─o╱──● + # ┃╭─|o─|o─╮┃ + # ●──o╱─o╱──● + # + col_j = FermionTensorNetwork(( + col_envs['left', j], + *[col_envs['mid', j+y] for y in range(y_bsz)], + col_envs['right', j + y_bsz - 1], + ), check_collisions=False).view_as_(FermionTensorNetwork2D, like=self) + # + # y_bsz + # <--> second_dense=True + # ●──●──●──● ╭──●──╮ + # │ │ │ │ or │ ╱ ╲ │ 'above' + # . . . . ┬ + # ┊ x_bsz + # . . . . ┴ + # │ │ │ │ or │ ╲ ╱ │ 'below' + # ●──●──●──● ╰──●──╯ + # + row_envs[j] = col_j.compute_row_environments( + yrange=(max(j - 1, 0), min(j + y_bsz, self.Ly - 1)), + dense=second_dense, **compute_environment_opts) + + # then range through all the possible plaquettes, selecting the correct + # boundary tensors from either the column or row environments + plaquette_envs = dict() + for i0, j0 in product(range(self.Lx - x_bsz + 1), + range(self.Ly - y_bsz + 1)): + + + env_ij = FermionTensorNetwork(( + row_envs[j0]['below', i0], + *[row_envs[j0]['mid', ix] for ix in range(i0, i0+x_bsz)], + row_envs[j0]['above', i0 + x_bsz - 1] + ), check_collisions=False) + + ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) + tid_lst = [] + for ij in ij_tags: + tid_lst += list(env_ij._get_tids_from_tags(ij)) + position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) + reorder_map = {i:j for i, j in zip(tid_lst, position)} + env_ij._reorder_from_tid(reorder_map, inplace=True) + + plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + + return plaquette_envs + def reorder(self, direction="ru", layer_tags=None, inplace=False): Lx, Ly = self._Lx, self._Ly @@ -142,23 +294,28 @@ def reorder(self, direction="ru", layer_tags=None, inplace=False): "d": range(Lx)[::-1], "r": range(Ly), "l": range(Ly)[::-1]} - if row_wise: - iterator = product(iter_dic[direction[0]], iter_dic[direction[1]]) - else: - iterator = product(iter_dic[direction[1]], iter_dic[direction[0]]) + iterator = product(iter_dic[direction[1]], iter_dic[direction[0]]) position = 0 tid_map = dict() for i, j in iterator: x, y = (i, j) if row_wise else (j, i) site_tag = self.site_tag(x, y) tid = self._get_tids_from_tags(site_tag) - if layer_tags is None or len(tid)==1: + if len(tid)==1: tid, = tid if tid not in tid_map: tid_map[tid] = position position += 1 else: - for tag in layer_tags: + if layer_tags is None: + _tags = [self.tensor_map[ix].tags for ix in tid] + _tmp_tags = _tags[0].copy() + for itag in _tags[1:]: + _tmp_tags &= itag + _layer_tags = sorted([list(i-_tmp_tags)[0] for i in _tags]) + else: + _layer_tags = layer_tags + for tag in _layer_tags: tid, = self._get_tids_from_tags((site_tag, tag)) if tid not in tid_map: tid_map[tid] = position From 37af5f4de7c5e0043befeee235b311efbdf340f3 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 8 Jan 2021 15:50:41 -0800 Subject: [PATCH 08/61] ind_size reimplementation --- quimb/tensor/fermion.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index c03a19f5..ce81d1d2 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -687,6 +687,18 @@ def fermion_owner(self): def parity(self): return self.data.parity + def ind_size(self, dim_or_ind): + if isinstance(dim_or_ind, str): + if dim_or_ind not in self.inds: + raise ValueError("%s indice not found in the tensor"%dim_or_ind) + dim_or_ind = self.inds.index(dim_or_ind) + + from pyblock3.algebra.symmetry import SZ, BondInfo + sz = [SZ.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] + sp = self.data.shapes[:,dim_or_ind] + bond_dict = dict(zip(sz, sp)) + return BondInfo(bond_dict) + def copy(self, deep=False): """Copy this tensor. Note by default (``deep=False``), the underlying array will *not* be copied. The fermion owner will to reset to None @@ -781,12 +793,6 @@ def H(self): return tsr - def ind_size(self, ind): - size = 0 - for blkshape in self.shapes: - size += blkshape[self.inds.index(ind)] - return size - def fuse(self, fuse_map, inplace=False): raise NotImplementedError From 28953945de679db8577c369131eced74a980a1c6 Mon Sep 17 00:00:00 2001 From: yangcal Date: Mon, 11 Jan 2021 16:59:06 -0800 Subject: [PATCH 09/61] bugfix in tensor split, multiply_index_diagonal addded --- quimb/tensor/fermion.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index ce81d1d2..005f3192 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -3,7 +3,6 @@ import functools from .tensor_core import (Tensor, TensorNetwork, rand_uuid, tags_to_oset, - tensor_split, _parse_split_opts, check_opt, _VALID_SPLIT_GET) @@ -195,9 +194,11 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N rtags = T.tags | tags_to_oset(rtags) if bond_ind is None: if absorb is None: - bond_ind = (rand_uuid(), rand_uuid()) + bond_ind = (rand_uuid(),) * 2 else: bond_ind = (rand_uuid(),) + elif isinstance(bond_ind, str): + bond_ind = (bond_ind,) * 2 Tl = FermionTensor(data=left, inds=(*left_inds, bond_ind[0]), tags=ltags) Tr = FermionTensor(data=right, inds=(bond_ind[-1], *right_inds), tags=rtags) @@ -538,6 +539,7 @@ def remove_tensor(self, tid_or_site, inplace=True): ABCDEF, (3, False) -> ABC-EF """ tid, site, tsr = self[tid_or_site] + tsr.remove_fermion_owner() del self.tensor_order[tid] if inplace: indent_sites = [] @@ -710,6 +712,35 @@ def copy(self, deep=False): t = self.__class__(self, None) return t + def multiply_index_diagonal(self, ind, x, inplace=False, location="front"): + """Multiply this tensor by 1D array ``x`` as if it were a diagonal + tensor being contracted into index ``ind``. + """ + if location not in ["front", "back"]: + raise ValueError("invalid for the location of the diagonal") + t = self if inplace else self.copy() + ax = t.inds.index(ind) + if isinstance(x, FermionTensor): + x = x.data + if location=="front": + out = np.tensordot(x, t.data, axes=((1,), (ax,))) + transpose_order = list(range(1, ax+1)) + [0] + list(range(ax+1, t.ndim)) + else: + out = np.tensordot(t.data, x, axes=((ax,),(0,))) + transpose_order = list(range(ax)) + [t.ndim-1] + list(range(ax, t.ndim-1)) + data = np.transpose(out, transpose_order) + t.modify(data=data) + return t + + multiply_index_diagonal_ = functools.partialmethod( + multiply_index_diagonal, inplace=True) + + def get_fermion_info(self): + if self.fermion_owner is None: + return None + fs, tid = self.fermion_owner[1:] + return (tid, fs().tensor_order[tid][1]) + def contract(self, *others, output_inds=None, **opts): return tensor_contract(self, *others, output_inds=output_inds, **opts) From d1b4c7816baa208a66c60fda47094196dd59d4c9 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 12 Jan 2021 13:06:53 -0800 Subject: [PATCH 10/61] bugfix for connect_funcs when 2nd tensor is before 1st one in f.s. --- quimb/tensor/fermion.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 005f3192..bb9711a7 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -218,8 +218,16 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N def _compress_connected(Tl, Tr, absorb='both', **compress_opts): left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] - out = _contract_connected(Tl, Tr) - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + if Tl.get_fermion_info()[1] < Tr.get_fermion_info()[1]: + out = _contract_connected(Tl, Tr) + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + else: + out = _contract_connected(Tr, Tl) + if absorb == "left": + absorb = "right" + elif absorb == "right": + absorb = "left" + r, l = out.split(left_inds=right_inds, right_inds=left_inds, absorb=absorb, get="tensors", **compress_opts) return l, r def tensor_compress_bond( @@ -250,15 +258,19 @@ def _canonize_connected(T1, T2, absorb='right', **split_opts): if not shared_ix: raise ValueError("The tensors specified don't share an bond.") - new_T1, tRfact = T1.split(left_env_ix, get='tensors', **split_opts) - new_T2 = _contract_connected(tRfact, T2) + if T1.get_fermion_info()[1] < T2.get_fermion_info()[1]: + new_T1, tRfact = T1.split(left_env_ix, get='tensors', **split_opts) + new_T2 = _contract_connected(tRfact, T2) + else: + tRfact, new_T1 = T1.split(shared_ix, get='tensors', **split_opts) + new_T2 = _contract_connected(T2, tRfact) + if absorb == "left": return new_T2, new_T1 else: return new_T1, new_T2 def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): - check_opt('absorb', absorb, ('left', 'both', 'right')) if absorb == 'both': From f65596dcc3bfa1ec6986ed08dc680de1a8759928 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 13 Jan 2021 12:55:15 -0800 Subject: [PATCH 11/61] bugfix in reorder; fix connected_contract --- quimb/tensor/fermion.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index bb9711a7..856d30ca 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -28,8 +28,17 @@ def _contract_connected(tsr1, tsr2, out_inds=None): if set(_output_inds) != set(out_inds): raise TypeError("specified out_inds not allowed in tensordot, \ make sure no summation/Hadamard product appears") - - out = np.tensordot(tsr1.data, tsr2.data, axes=[ax_a, ax_b]) + info1 = tsr1.get_fermion_info() + info2 = tsr2.get_fermion_info() + reverse_contract = False + + if info1 is not None and info2 is not None: + if info1[1] > info2[1]: + reverse_contract=True + if reverse_contract: + out = np.tensordot(tsr2.data, tsr1.data, axes=[ax_b, ax_a]) + else: + out = np.tensordot(tsr1.data, tsr2.data, axes=[ax_a, ax_b]) if len(out_inds)==0: return out.data[0] @@ -418,6 +427,14 @@ def get_tid(self, site): idx = self.sites.index(site) return list(self.tensor_order.keys())[idx] + def _reorder_from_dict(self, tid_map): + tid_lst = list(tid_map.keys()) + des_sites = list(tid_map.values()) + work_des_sites = sorted(des_sites)[::-1] + for isite in work_des_sites: + ind = des_sites.index(isite) + self.move(tid_lst[ind], isite) + def is_adjacent(self, tid_or_site1, tid_or_site2): """ Check whether two tensors are adjacently placed in the space """ @@ -823,6 +840,8 @@ def transpose(self, *output_inds, inplace=False): t.modify(apply=lambda x: np.transpose(x, out_shape), inds=output_inds) return t + transpose_ = functools.partialmethod(transpose, inplace=True) + @property def H(self): """Return the ket of this tensor, this is different from Fermionic transposition @@ -952,8 +971,7 @@ def __or__(self, other): def _reorder_from_tid(self, tid_map, inplace=False): tn = self if inplace else self.copy() - for tid, site in tid_map.items(): - tn.fermion_space.move(tid, site) + tn.fermion_space._reorder_from_dict(tid_map) return tn def assemble_with_tensor(self, tsr): From 56ab63e50b4d33486013cc7cee9aa1ffb9c43ed9 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 13 Jan 2021 15:50:47 -0800 Subject: [PATCH 12/61] bugfix for tensor_contract when output_inds given --- quimb/tensor/fermion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 856d30ca..fc09f138 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -169,7 +169,7 @@ def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False) raise TypeError("specified out_inds not allow in tensordot, \ make sure not summation/Hadamard product appears") if output_inds!=_output_inds: - out = out.transpose(output_inds, inplace=True) + out = out.transpose(*output_inds, inplace=True) return out def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=None, cutoff=1e-10, From 6b615c9203953a3c6c22063f40efa2e5f4664020 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 13 Jan 2021 16:19:57 -0800 Subject: [PATCH 13/61] fix bug in fs.replace_tensor --- quimb/tensor/fermion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index fc09f138..ed5d8981 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -392,7 +392,7 @@ def replace_tensor(self, site, tsr, tid=None, virtual=False): T.set_fermion_owner(self, tid) atsr.remove_fermion_owner() - del atsr + del self.tensor_order[atid] self.tensor_order[tid] = (T, site) def insert_tensor(self, site, tsr, tid=None, virtual=False): From 4909895cf83543dd9637e29346ea6a7876a23620 Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 14 Jan 2021 15:05:47 -0800 Subject: [PATCH 14/61] add fermion_2d.gate --- quimb/tensor/fermion_2d.py | 316 ++++++++++++++++++++++++++++-- quimb/tensor/test/test_gate_2d.py | 64 ++++++ 2 files changed, 365 insertions(+), 15 deletions(-) create mode 100644 quimb/tensor/test/test_gate_2d.py diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 33dac2b9..02f11217 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -1,14 +1,267 @@ -from .fermion import FermionTensorNetwork, FermionTensor -from .tensor_2d import TensorNetwork2D, TensorNetwork2DVector, PEPS +from .fermion import ( + FermionTensorNetwork, + FermionTensor, + tensor_contract +) +from .tensor_2d import ( + TensorNetwork2D, + TensorNetwork2DVector, + PEPS, + is_lone_coo, + gen_long_range_path) from .tensor_core import ( rand_uuid, oset, - tags_to_oset + tags_to_oset, + bonds ) +from ..utils import check_opt, pairwise from collections import defaultdict from itertools import product import numpy as np +import functools +from pyblock3.algebra.fermion import FlatFermionTensor + +INVERSE_CUTOFF = 1e-10 + +def gate_string_split_(TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts): + # by default this means singuvalues are kept in the string 'blob' tensor + compress_opts.setdefault('absorb', 'right') + loc_info = dict([t.get_fermion_info() for t in original_ts]) + # the outer, neighboring indices of each tensor in the string + neighb_inds = [] + + # tensors we are going to contract in the blob, reindex some to attach gate + contract_ts = [] + fermion_info = [] + + for t, coo in zip(original_ts, string): + neighb_inds.append(tuple(ix for ix in t.inds if ix not in bonds_along)) + contract_ts.append(t.reindex_(reindex_map) if coo in where else t) + fermion_info.append(t.get_fermion_info()) + + blob = tensor_contract(*contract_ts, TG, inplace=True) + regauged = [] + work_site = blob.get_fermion_info()[1] + fs = blob.fermion_owner[1]() + + # one by one extract the site tensors again from each end + inner_ts = [None] * len(string) + i = 0 + j = len(string) - 1 + + while True: + lix = neighb_inds[i] + if i > 0: + lix += (bonds_along[i - 1],) + + # the original bond we are restoring + bix = bonds_along[i] + + # split the blob! + inner_ts[i], *maybe_svals, blob = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + #coo_pair = tuple(sorted((string[i], string[i + 1]))) + coo_pair = (string[i], string[i+1]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to unguage later + if i != j - 1: + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((i + 1, bix, "front", s)) + + # move inwards along string, terminate if two ends meet + i += 1 + if i == j: + inner_ts[i] = blob + break + + # extract at end of string + lix = neighb_inds[j] + if j < len(string) - 1: + lix += (bonds_along[j],) + + # the original bond we are restoring + bix = bonds_along[j - 1] + + # split the blob! + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[j] = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[j-1], string[j]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to ungauge later + if j != i + 1: + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((j - 1, bix, "back", s)) + + # move inwards along string, terminate if two ends meet + j -= 1 + if j == i: + inner_ts[j] = blob + break + # SVD funcs needs to be modify and make sure S has even parity + for i, bix, location, s in regauged: + idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] + snew = np.zeros_like(s.data) + snew[idx] = 1/s.data[idx] + snew = FlatFermionTensor(s.q_labels, s.shapes, snew, idxs=s.idxs) + t = inner_ts[i] + t.multiply_index_diagonal_(bix, snew, location=location) + + for to, tn in zip(original_ts, inner_ts): + x1 = tn.inds + tn.transpose_like_(to) + to.modify(data=tn.data) + + for i, (tid, _) in enumerate(fermion_info): + if i==0: + fs.replace_tensor(work_site, original_ts[i], tid=tid, virtual=True) + else: + fs.insert_tensor(work_site+i, original_ts[i], tid=tid, virtual=True) + + fs._reorder_from_dict(dict(fermion_info)) + +def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, + reindex_map, site_ix, info, **compress_opts): + compress_opts.setdefault('absorb', 'right') + + # indices to reduce, first and final include physical indices for gate + inds_to_reduce = [(bonds_along[0], site_ix[0])] + for b1, b2 in pairwise(bonds_along): + inds_to_reduce.append((b1, b2)) + inds_to_reduce.append((bonds_along[-1], site_ix[-1])) + + # tensors that remain on the string sites and those pulled into string + outer_ts, inner_ts = [], [] + fermion_info = [] + fs = TG.fermion_owner[1]() + tid_lst = [] + for coo, rix, t in zip(string, inds_to_reduce, original_ts): + tq, tr = t.split(left_inds=None, right_inds=rix, + method='svd', get='tensors', absorb="right") + fermion_info.append(t.get_fermion_info()) + outer_ts.append(tq) + inner_ts.append(tr.reindex_(reindex_map) if coo in where else tr) + + for tq, tr, t in zip(outer_ts, inner_ts, original_ts): + isite = t.get_fermion_info()[1] + fs.replace_tensor(isite, tq, virtual=True) + fs.insert_tensor(isite+1, tr, virtual=True) + + blob = tensor_contract(*inner_ts, TG, inplace=True) + work_site = blob.get_fermion_info()[1] + regauged = [] + + # extract the new reduced tensors sequentially from each end + i = 0 + j = len(string) - 1 + + while True: + + # extract at beginning of string + lix = bonds(blob, outer_ts[i]) + if i == 0: + lix.add(site_ix[0]) + else: + lix.add(bonds_along[i - 1]) + + # the original bond we are restoring + bix = bonds_along[i] + + # split the blob! + inner_ts[i], *maybe_svals, blob = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[i], string[i + 1]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to unguage later + if i != j - 1: + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((i + 1, bix, "front", s)) + + # move inwards along string, terminate if two ends meet + i += 1 + if i == j: + inner_ts[i] = blob + break + + # extract at end of string + lix = bonds(blob, outer_ts[j]) + if j == len(string) - 1: + lix.add(site_ix[-1]) + else: + lix.add(bonds_along[j]) + + # the original bond we are restoring + bix = bonds_along[j - 1] + + # split the blob! + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[j] = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[j - 1], string[j]) + info['singular_values', coo_pair] = s + + # regauge the blob but record so as to unguage later + if j != i + 1: + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((j - 1, bix, "back", s)) + + # move inwards along string, terminate if two ends meet + j -= 1 + if j == i: + inner_ts[j] = blob + break + + for i, (tid, _) in enumerate(fermion_info): + if i==0: + fs.replace_tensor(work_site, inner_ts[i], tid=tid, virtual=True) + else: + fs.insert_tensor(work_site+i, inner_ts[i], tid=tid, virtual=True) + + new_ts = [ + tensor_contract(ts, tr, output_inds=to.inds, inplace=True) + for to, ts, tr in zip(original_ts, outer_ts, inner_ts) + ] + for i, bix, location, s in regauged: + idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] + snew = np.zeros_like(s.data) + snew[idx] = 1/s.data[idx] + snew = FlatFermionTensor(s.q_labels, s.shapes, snew, idxs=s.idxs) + t = new_ts[i] + t.multiply_index_diagonal_(bix, snew, location=location) + + for (tid, _), to, t in zip(fermion_info, original_ts, new_ts): + site = t.get_fermion_info()[1] + to.modify(data=t.data) + fs.replace_tensor(site, to, tid=tid, virtual=True) + + fs._reorder_from_dict(dict(fermion_info)) class FermionTensorNetwork2D(FermionTensorNetwork,TensorNetwork2D): @@ -496,10 +749,18 @@ def rand(cls, Lx, Ly, bond_dim, phys_dim=2, return cls(arrays, **peps_opts) -class FermionTensorNetwork2DVector(TensorNetwork2DVector, - FermionTensorNetwork2D, - FermionTensorNetwork): +class FermionTensorNetwork2DVector(FermionTensorNetwork2D, + FermionTensorNetwork, + TensorNetwork2DVector): + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_site_ind_id', + ) def to_dense(self, *inds_seq, **contract_opts): raise NotImplementedError @@ -543,6 +804,8 @@ def gate( tags=None, inplace=False, info=None, + long_range_use_swaps=False, + long_range_path_sequence=None, **compress_opts ): check_opt("contract", contract, (False, True, 'split', 'reduce-split')) @@ -555,7 +818,6 @@ def gate( where = tuple(where) ng = len(where) - dp = psi.phys_dim(*where[0]) tags = tags_to_oset(tags) # allow a matrix to be reshaped into a tensor if it factorizes @@ -566,7 +828,7 @@ def gate( bnds = [rand_uuid() for _ in range(ng)] reindex_map = dict(zip(site_ix, bnds)) - TG = Tensor(G, inds=site_ix + bnds, tags=tags, left_inds=bnds) + TG = FermionTensor(G, inds=bnds + site_ix, tags=tags, left_inds=bnds) # [bnds first, then site_ix] if contract is False: # @@ -587,23 +849,47 @@ def gate( # ╱ ╱ # psi.reindex_(reindex_map) + input_tid, = psi._get_tids_from_inds(bnds, which='any') + isite = psi.tensor_map[input_tid].get_fermion_info()[1] + + #psi |= TG + psi.fermion_space.add_tensor(TG, virtual=True) # get the sites that used to have the physical indices site_tids = psi._get_tids_from_inds(bnds, which='any') # pop the sites, contract, then re-add - pts = [psi._pop_tensor(tid) for tid in site_tids] - psi |= tensor_contract(*pts, TG) - + pts = [psi._pop_tensor_(tid) for tid in site_tids] + out = tensor_contract(*pts, TG, inplace=True) + psi.fermion_space.move(out.get_fermion_info()[0], isite) + psi |= out return psi # following are all based on splitting tensors to maintain structure ij_a, ij_b = where - long_range_path_sequence = None - manual_lr_path = False + # parse the argument specifying how to find the path between + # non-nearest neighbours + if long_range_path_sequence is not None: + # make sure we can index + long_range_path_sequence = tuple(long_range_path_sequence) + # if the first element is a str specifying move sequence, e.g. + # ('v', 'h') + # ('av', 'bv', 'ah', 'bh') # using swaps + manual_lr_path = not isinstance(long_range_path_sequence[0], str) + # otherwise assume a path has been manually specified, e.g. + # ((1, 2), (2, 2), (2, 3), ... ) + # (((1, 1), (1, 2)), ((4, 3), (3, 3)), ...) # using swaps + else: + manual_lr_path = False + + psi.fermion_space.add_tensor(TG, virtual=True) + # check if we are not nearest neighbour and need to swap first + if long_range_use_swaps: + raise NotImplementedError + string = tuple(gen_long_range_path( - *where, sequence=long_range_path_sequence)) + *where, sequence=long_range_path_sequence)) # the tensors along this string, which will be updated original_ts = [psi[coo] for coo in string] @@ -634,9 +920,9 @@ def gate( gate_string_reduce_split_( TG, where, string, original_ts, bonds_along, reindex_map, site_ix, info, **compress_opts) - return psi + gate_ = functools.partialmethod(gate, inplace=True) def compute_norm( self, diff --git a/quimb/tensor/test/test_gate_2d.py b/quimb/tensor/test/test_gate_2d.py new file mode 100644 index 00000000..b1a5196c --- /dev/null +++ b/quimb/tensor/test/test_gate_2d.py @@ -0,0 +1,64 @@ +import numpy as np +from quimb.tensor.fermion_2d import FPEPS, FermionTensorNetwork2DVector +from pyblock3.algebra.symmetry import SZ, BondInfo +from pyblock3.algebra.fermion import (SparseFermionTensor, + FlatFermionTensor) +import time + +def compute_norm(psi, max_bond): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + + bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + + norm = bra & ket + + envs = norm._compute_plaquette_environments_col_first(x_bsz=1, y_bsz=1, layer_tags=layer_tags, max_bond=max_bond) + for key, val in envs.items(): + fs = val.fermion_space + ntsr = len(val.tensor_map) + for i in range(ntsr-1): + out = fs._contract_pairs(0,1) + print("Col:", key, out) + + envs = norm._compute_plaquette_environments_row_first(x_bsz=1, y_bsz=1, layer_tags=layer_tags, max_bond=max_bond) + for key, val in envs.items(): + fs = val.fermion_space + ntsr = len(val.tensor_map) + for i in range(ntsr-1): + out = fs._contract_pairs(0,1) + print("Row:",key, out) + +Lx = Ly = 4 +D = 2 + +np.random.seed(3) +infox = BondInfo({SZ(0,0,0):2, SZ(1,0,0): 2}) +G = SparseFermionTensor.random((infox,infox,infox,infox), dq=SZ(1)).to_flat() +TG = FlatFermionTensor.eye(infox) +psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) +psi.view_as_(FermionTensorNetwork2DVector, like=psi) + +max_bond=None +cutoff = 1e-10 + +site = ((0,0), (0,1)) + +t0 = time.time() +psi0 = psi.gate_(TG,((1,1)), contract=True, absorb=None, max_bond=max_bond, info=dict()) +t1 = time.time() +psi1 = psi.gate(G, site, contract="split", absorb=None, max_bond=max_bond, cutoff=cutoff, info=dict()) +t2 = time.time() +psi2 = psi.gate(G, site, contract="reduce-split", absorb=None, max_bond=max_bond, cutoff=cutoff, info=dict()) +t3 = time.time() +print(t1-t0, t2-t1, t3-t2) + +max_bond = 16 +print("chi=%i"%max_bond) +print("Checking split gate norm") +compute_norm(psi1, max_bond) +print("Checking reduce-split gate norm") +compute_norm(psi2, max_bond) From ca7f46381538d4a56892aeac17acb5cef1dddc23 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 19 Jan 2021 16:24:13 -0800 Subject: [PATCH 15/61] bugfix for definition of tensor order in contract/split/compress --- quimb/tensor/fermion.py | 33 +++++++-------- quimb/tensor/fermion_2d.py | 83 +++++++++++++++++++++++++++++--------- 2 files changed, 78 insertions(+), 38 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index ed5d8981..267489c9 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -12,7 +12,13 @@ def _contract_connected(tsr1, tsr2, out_inds=None): - ainds, binds = tsr1.inds, tsr2.inds + info1 = tsr1.get_fermion_info() + info2 = tsr2.get_fermion_info() + t1, t2 = tsr1, tsr2 + if info1 is not None and info2 is not None: + if info1[1] < info2[1]: + t1, t2 = tsr2, tsr1 + ainds, binds = t1.inds, t2.inds _output_inds = [] ax_a, ax_b = [], [] for kia, ia in enumerate(ainds): @@ -28,17 +34,9 @@ def _contract_connected(tsr1, tsr2, out_inds=None): if set(_output_inds) != set(out_inds): raise TypeError("specified out_inds not allowed in tensordot, \ make sure no summation/Hadamard product appears") - info1 = tsr1.get_fermion_info() - info2 = tsr2.get_fermion_info() - reverse_contract = False - if info1 is not None and info2 is not None: - if info1[1] > info2[1]: - reverse_contract=True - if reverse_contract: - out = np.tensordot(tsr2.data, tsr1.data, axes=[ax_b, ax_a]) - else: - out = np.tensordot(tsr1.data, tsr2.data, axes=[ax_a, ax_b]) + out = np.tensordot(t1.data, t2.data, axes=[ax_a, ax_b]) + if len(out_inds)==0: return out.data[0] @@ -227,16 +225,15 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N def _compress_connected(Tl, Tr, absorb='both', **compress_opts): left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] + out = _contract_connected(Tl, Tr) if Tl.get_fermion_info()[1] < Tr.get_fermion_info()[1]: - out = _contract_connected(Tl, Tr) - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) - else: - out = _contract_connected(Tr, Tl) if absorb == "left": absorb = "right" elif absorb == "right": absorb = "left" r, l = out.split(left_inds=right_inds, right_inds=left_inds, absorb=absorb, get="tensors", **compress_opts) + else: + l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) return l, r def tensor_compress_bond( @@ -268,11 +265,11 @@ def _canonize_connected(T1, T2, absorb='right', **split_opts): raise ValueError("The tensors specified don't share an bond.") if T1.get_fermion_info()[1] < T2.get_fermion_info()[1]: + tRfact, new_T1 = T1.split(shared_ix, get="tensors", **split_opts) + new_T2 = _contract_connected(T2, tRfact) + else: new_T1, tRfact = T1.split(left_env_ix, get='tensors', **split_opts) new_T2 = _contract_connected(tRfact, T2) - else: - tRfact, new_T1 = T1.split(shared_ix, get='tensors', **split_opts) - new_T2 = _contract_connected(T2, tRfact) if absorb == "left": return new_T2, new_T1 diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 02f11217..6b855784 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -60,7 +60,9 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, bix = bonds_along[i] # split the blob! - inner_ts[i], *maybe_svals, blob = blob.split( + + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[i] = blob.split( left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) # if singular values are returned (``absorb=None``) check if we should @@ -74,8 +76,8 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, # regauge the blob but record so as to unguage later if i != j - 1: - blob.multiply_index_diagonal_(bix, s, location="front") - regauged.append((i + 1, bix, "front", s)) + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((i + 1, bix, "back", s)) # move inwards along string, terminate if two ends meet i += 1 @@ -92,8 +94,7 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, bix = bonds_along[j - 1] # split the blob! - lix = tuple(oset(blob.inds)-oset(lix)) - blob, *maybe_svals, inner_ts[j] = blob.split( + inner_ts[j], *maybe_svals, blob= blob.split( left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) # if singular values are returned (``absorb=None``) check if we should @@ -105,8 +106,8 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, # regauge the blob but record so as to ungauge later if j != i + 1: - blob.multiply_index_diagonal_(bix, s, location="back") - regauged.append((j - 1, bix, "back", s)) + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((j - 1, bix, "front", s)) # move inwards along string, terminate if two ends meet j -= 1 @@ -123,9 +124,8 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, t.multiply_index_diagonal_(bix, snew, location=location) for to, tn in zip(original_ts, inner_ts): - x1 = tn.inds tn.transpose_like_(to) - to.modify(data=tn.data) + to.modify(data=tn.data, inds=tn.inds) for i, (tid, _) in enumerate(fermion_info): if i==0: @@ -159,8 +159,8 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, for tq, tr, t in zip(outer_ts, inner_ts, original_ts): isite = t.get_fermion_info()[1] - fs.replace_tensor(isite, tq, virtual=True) - fs.insert_tensor(isite+1, tr, virtual=True) + fs.replace_tensor(isite, tr, virtual=True) + fs.insert_tensor(isite+1, tq, virtual=True) blob = tensor_contract(*inner_ts, TG, inplace=True) work_site = blob.get_fermion_info()[1] @@ -183,7 +183,8 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, bix = bonds_along[i] # split the blob! - inner_ts[i], *maybe_svals, blob = blob.split( + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[i] = blob.split( left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) # if singular values are returned (``absorb=None``) check if we should @@ -195,8 +196,8 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, # regauge the blob but record so as to unguage later if i != j - 1: - blob.multiply_index_diagonal_(bix, s, location="front") - regauged.append((i + 1, bix, "front", s)) + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((i + 1, bix, "back", s)) # move inwards along string, terminate if two ends meet i += 1 @@ -215,10 +216,8 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, bix = bonds_along[j - 1] # split the blob! - lix = tuple(oset(blob.inds)-oset(lix)) - blob, *maybe_svals, inner_ts[j] = blob.split( + inner_ts[j], *maybe_svals, blob = blob.split( left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) - # if singular values are returned (``absorb=None``) check if we should # return them via ``info``, e.g. for ``SimpleUpdate` if maybe_svals and info is not None: @@ -228,8 +227,8 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, # regauge the blob but record so as to unguage later if j != i + 1: - blob.multiply_index_diagonal_(bix, s, location="back") - regauged.append((j - 1, bix, "back", s)) + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((j - 1, bix, "front", s)) # move inwards along string, terminate if two ends meet j -= 1 @@ -828,7 +827,7 @@ def gate( bnds = [rand_uuid() for _ in range(ng)] reindex_map = dict(zip(site_ix, bnds)) - TG = FermionTensor(G, inds=bnds + site_ix, tags=tags, left_inds=bnds) # [bnds first, then site_ix] + TG = FermionTensor(G.copy(), inds=site_ix+bnds, tags=tags, left_inds=site_ix) # [bnds first, then site_ix] if contract is False: # @@ -957,3 +956,47 @@ def normalize( **boundary_contract_opts, ): raise NotImplementedError + +def _gen_site_wfn_tsr(state, ndim=2, ax=0): + from pyblock3.algebra.core import SubTensor + from pyblock3.algebra.fermion import SparseFermionTensor + from pyblock3.algebra.symmetry import SZ + state_map = {0:(0,0), 1:(1,0), 2:(1,1), 3:(0,1)} + if state not in state_map: + raise KeyError("requested state not recoginized") + q_lab, ind = state_map[state] + q_label = [SZ(0),] * ax + [SZ(q_lab),] + [SZ(0),] *(ndim-ax-1) + shape = [1,] * ax + [2,] + [1,] *(ndim-ax-1) + dat = np.zeros([2]) + dat.put(ind, 1) + dat = dat.reshape(shape) + blocks = [SubTensor(reduced=dat, q_labels=q_label)] + smat = SparseFermionTensor(blocks=blocks).to_flat() + return smat + +def gen_mf_peps(state_array, shape='urdlp', **kwargs): + Lx, Ly = state_array.shape + arr = state_array.astype("int") + cache = dict() + def _gen_ij(i, j): + state = arr[i, j] + array_order = shape + if i == Lx - 1: + array_order = array_order.replace('u', '') + if j == Ly - 1: + array_order = array_order.replace('r', '') + if i == 0: + array_order = array_order.replace('d', '') + if j == 0: + array_order = array_order.replace('l', '') + + ndim = len(array_order) + ax = array_order.index('p') + key = (state, ndim, ax) + if key not in cache: + cache[key] = _gen_site_wfn_tsr(state, ndim, ax).copy() + return cache[key] + + tsr_array = [[_gen_ij(i,j) for j in range(Ly)] for i in range(Lx)] + + return FPEPS(tsr_array, shape=shape, **kwargs) From 2f604b85626021ccc7441b0552b5b20fb778bcd8 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 19 Jan 2021 16:56:01 -0800 Subject: [PATCH 16/61] add fermion operators --- quimb/tensor/fermion_ops.py | 63 ++++++++++++++++++++++++++++++++ quimb/tensor/test/test_ops.py | 68 +++++++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 quimb/tensor/fermion_ops.py create mode 100644 quimb/tensor/test/test_ops.py diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py new file mode 100644 index 00000000..a27a1316 --- /dev/null +++ b/quimb/tensor/fermion_ops.py @@ -0,0 +1,63 @@ +import numpy as np +from itertools import product +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor +from pyblock3.algebra.symmetry import SZ, BondInfo +from .fermion_2d import FPEPS,FermionTensorNetwork2DVector + + +def ham_eye(const=1.): + seven = SZ(0) + sodd = SZ(1) + info = BondInfo({seven:2, sodd:2}) + return FlatFermionTensor.eye(info) + +def gen_h1(h=1.): + blocks= [] + for i, j in product(range(2), repeat=2): + qlab = (SZ(i), SZ(j), SZ(1-i), SZ(1-j)) + qlst = [q.n for q in qlab] + iblk = np.zeros([2,2,2,2]) + blocks.append(SubTensor(reduced=np.zeros([2,2,2,2]), q_labels=(SZ(i), SZ(j), SZ(i), SZ(j)))) + if (i+j)==1: + iblk[0,0,0,0] = iblk[i,j,j,i] = h + iblk[1,1,1,1] = iblk[j,i,i,j] = -h + else: + if i == 0: + iblk[0,1,0,1] = iblk[1,0,0,1] = h + iblk[1,0,1,0] = iblk[0,1,1,0] = -h + else: + iblk[0,1,0,1] = iblk[0,1,1,0] = -h + iblk[1,0,1,0] = iblk[1,0,0,1] = h + blocks.append(SubTensor(reduced=iblk, q_labels=qlab)) + hop = SparseFermionTensor(blocks=blocks).to_flat() + return hop + +hopping = lambda t=1.0: gen_h1(-t) + +def onsite_u(u=1): + umat0 = np.zeros([2,2]) + umat0[1,1] = u + umat1 = np.zeros([2,2]) + blocks = [SubTensor(reduced=umat0, q_labels=(SZ(0), SZ(0))), + SubTensor(reduced=umat1, q_labels=(SZ(1), SZ(1)))] + umat = SparseFermionTensor(blocks=blocks).to_flat() + return umat + +def count_n(): + nmat0 = np.zeros([2,2]) + nmat0[1,1] = 2 + nmat1 = np.eye(2) + blocks = [SubTensor(reduced=nmat0, q_labels=(SZ(0), SZ(0))), + SubTensor(reduced=nmat1, q_labels=(SZ(1), SZ(1)))] + nmat = SparseFermionTensor(blocks=blocks).to_flat() + return nmat + +def measure_sz(): + zmat0 = np.zeros([2,2]) + zmat1 = np.eye(2) * .5 + zmat1[1,1]= -.5 + blocks = [SubTensor(reduced=zmat0, q_labels=(SZ(0), SZ(0))), + SubTensor(reduced=zmat1, q_labels=(SZ(1), SZ(1)))] + smat = SparseFermionTensor(blocks=blocks).to_flat() + return smat diff --git a/quimb/tensor/test/test_ops.py b/quimb/tensor/test/test_ops.py new file mode 100644 index 00000000..9a69381f --- /dev/null +++ b/quimb/tensor/test/test_ops.py @@ -0,0 +1,68 @@ +import numpy as np +from quimb.tensor import fermion_ops as ops +from quimb.tensor.fermion_2d import gen_mf_peps, FermionTensorNetwork2DVector +from quimb.tensor.fermion import tensor_contract +from pyblock3.algebra.symmetry import SZ +from itertools import product + + +def get_state(out): + vecmap = {(SZ(0), 0): "0,", + (SZ(0), 1): "-+,", + (SZ(1), 0): "+,", + (SZ(1), 1): "-,"} + outstring = "" + for iblk in out.blocks: + data = np.asarray(iblk) + inds = np.where(abs(data)>0.) + for ia, ib in zip(*inds): + key1 = (iblk.q_labels[0], ia) + key2 = (iblk.q_labels[1], ib) + val = data[ia, ib] + outstring += "+ %.1f|"%(val) + vecmap[key1] + vecmap[key2].replace(',','> ') + + if outstring=="": + outstring= "|0>" + return outstring + +max_bond=4 +Lx, Ly = 1,2 + +state_array = np.random.randint(0,4,[Lx,Ly]) + +def test_hopping(ix, iy): + state_array = np.asarray([[ix,iy]]) + psi = gen_mf_peps(state_array, tags=("KET")) + psi.view_as_(FermionTensorNetwork2DVector, like=psi) + umat = ops.onsite_u(4) + nmat = ops.count_n() + zmat = ops.measure_sz() + tmat = ops.gen_h1(1) + + instate = tensor_contract(psi[0,0], psi[0,1]) + + psi1 = psi.gate(tmat.copy(), ((0,0), (0,1)), contract='split') + + outstate = tensor_contract(psi1[0,0], psi1[0,1]) + instring = get_state(instate.data.to_sparse()) + outstring = get_state(outstate.data.to_sparse()) + print("Input:", instring) + print("Output 1:", outstring) + + state = np.tensordot(psi[0,1].data, psi[0,0].data, axes=((0,),(0,))) + outstate = np.tensordot(tmat, state, axes=((2,3),(1,0))).transpose([1,0]) + print("Output 2:",get_state(outstate.to_sparse())) + + outstate = np.tensordot(tmat, state, axes=((2,3),(0,1))) + print("Output 3:",get_state(outstate.to_sparse())) + + psi1 = psi.gate(tmat.copy(), ((0,1), (0,0)), contract='reduce-split') + outstate = tensor_contract(psi1[0,0], psi1[0,1]) + outstring = get_state(outstate.data.to_sparse()) + + print("Output 4:", outstring) + +for ix, iy in product(range(4), repeat=2): + if ix==iy: continue + print("testing %i %i"%(ix, iy)) + test_hopping(ix, iy) From 87f9331def7a5f51fca2fee4a7522eeb422e09c0 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 20 Jan 2021 12:21:40 -0800 Subject: [PATCH 17/61] bug fix in dense boundary contraction --- quimb/tensor/fermion_2d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 6b855784..4cf5d930 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -300,9 +300,9 @@ def compute_row_environments(self, dense=False, **compress_opts): # upwards pass row_envs['below', 0] = FermionTensorNetwork([]) first_row = self.row_tag(0) + row_envs['mid', 0] = env_bottom.select(first_row).simple_copy() if dense: env_bottom ^= first_row - row_envs['mid', 0] = env_bottom.select(first_row).simple_copy() row_envs['below', 1] = env_bottom.select(first_row).simple_copy() for i in range(2, env_bottom.Lx): below_row = env_bottom.row_tag(i-1) @@ -342,9 +342,9 @@ def compute_col_environments(self, dense=False, **compress_opts): # upwards pass col_envs['left', 0] = FermionTensorNetwork([]) first_col = self.col_tag(0) + col_envs['mid', 0] = env_left.select(first_col).simple_copy() if dense: env_left ^= first_col - col_envs['mid', 0] = env_left.select(first_col).simple_copy() col_envs['left', 1] = env_left.select(first_col).simple_copy() for i in range(2, env_left.Ly): From 88705f53d2af61d56f223d8576c0ee012059f91c Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 20 Jan 2021 15:33:56 -0800 Subject: [PATCH 18/61] add tn.contract method; introduce compute_norm in 2d ftns --- quimb/tensor/fermion.py | 19 +++++++++++++++++-- quimb/tensor/fermion_2d.py | 11 ----------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 267489c9..fd7f6e6d 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -125,7 +125,7 @@ def _fetch_fermion_space(*tensors, inplace=True): tid_lst = list(fs.tensor_order.keys()) return fs, tid_lst -def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False): +def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False, **contract_opts): """ Perform tensor contractions for all the given tensors. If input tensors do not belong to the same underlying fsobj, the position of each tensor will be the same as its order in the input tensor tuple/list. @@ -145,7 +145,7 @@ def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False) ------- out : a FermionTensor object or a number """ - path_info = _tensor_contract(*tensors, get='path-info') + path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) for conc in path_info.contraction_list: pos1, pos2 = conc[0] @@ -223,6 +223,8 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N return FermionTensorNetwork(tensors, check_collisions=False, virtual=True) def _compress_connected(Tl, Tr, absorb='both', **compress_opts): + if Tl.inds == Tr.inds: + return Tl, Tr left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] out = _contract_connected(Tl, Tr) @@ -1469,6 +1471,19 @@ def contract_tags(self, tags, inplace=False, which='any', **opts): untagged_tn.add_tensor(contracted, virtual=True) return untagged_tn + def contract(self, tags=..., inplace=False, **opts): + + if tags is all: + return tensor_contract(*self, **opts) + + # this checks whether certain TN classes have a manually specified + # contraction pattern (e.g. 1D along the line) + if self._CONTRACT_STRUCTURED: + raise NotImplementedError("structured contraction not implemented") + + # else just contract those tensors specified by tags. + return self.contract_tags(tags, inplace=inplace, **opts) + def _compress_between_tids( self, tid1, diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 4cf5d930..2d61a695 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -923,17 +923,6 @@ def gate( gate_ = functools.partialmethod(gate, inplace=True) - def compute_norm( - self, - layer_tags=('KET', 'BRA'), - **contract_opts, - ): - """Compute the norm of this vector via boundary contraction. - """ - raise NotImplementedError - norm = self.make_norm(layer_tags=layer_tags) - return norm.contract_boundary(layer_tags=layer_tags, **contract_opts) - def compute_local_expectation( self, terms, From ec96e26148809c5b1ab04fa4af956f33f12a3a9b Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 26 Jan 2021 12:28:21 -0800 Subject: [PATCH 19/61] fermion operators exponential added; operator tests added --- quimb/tensor/fermion_ops.py | 47 ++++++++- quimb/tensor/test/test_env_ops.py | 158 ++++++++++++++++++++++++++++++ quimb/tensor/test/test_exp.py | 132 +++++++++++++++++++++++++ quimb/tensor/test/test_norm_2d.py | 72 ++++++++++++++ 4 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 quimb/tensor/test/test_env_ops.py create mode 100644 quimb/tensor/test/test_exp.py create mode 100644 quimb/tensor/test/test_norm_2d.py diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py index a27a1316..0ad13a78 100644 --- a/quimb/tensor/fermion_ops.py +++ b/quimb/tensor/fermion_ops.py @@ -1,10 +1,55 @@ import numpy as np from itertools import product from pyblock3.algebra.core import SubTensor -from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor +from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor, _pack_flat_tensor, _unpack_flat_tensor from pyblock3.algebra.symmetry import SZ, BondInfo from .fermion_2d import FPEPS,FermionTensorNetwork2DVector +def to_exp(tsr, x): + ndim = tsr.ndim + if tsr.parity == 1: + raise ValueError("expontial of odd parity tensor not defined") + if np.mod(ndim, 2) !=0: + raise ValueError("dimension of the tensor must be even (%i)"%ndim) + ax = ndim //2 + data = [] + udata, sdata, vdata = [],[],[] + uq,sq,vq= [],[],[] + ushapes, vshapes, sshapes = [],[],[] + sz_labels = ((SZ(0),SZ(0)), (SZ(1), SZ(1))) + if ndim == 2: + parity_axes = None + else: + parity_axes = list(range(ax)) + + for szlab in sz_labels: + data, row_map, col_map = _pack_flat_tensor(tsr, szlab, ax, parity_axes) + el, ev = np.linalg.eig(data) + s = np.diag(np.exp(-el*x)) + _unpack_flat_tensor(ev, row_map, 0, udata, uq, ushapes, parity_axes) + _unpack_flat_tensor(ev.conj().T, col_map, 1, vdata, vq, vshapes) + sq.append([SZ.to_flat(iq) for iq in szlab]) + sshapes.append(s.shape) + sdata.append(s.ravel()) + + sq = np.asarray(sq, dtype=np.uint32) + sshapes = np.asarray(sshapes, dtype=np.uint32) + sdata = np.concatenate(sdata) + s = FlatFermionTensor(sq, sshapes, sdata) + + uq = np.asarray(uq, dtype=np.uint32) + ushapes = np.asarray(ushapes, dtype=np.uint32) + udata = np.concatenate(udata) + + vq = np.asarray(vq, dtype=np.uint32) + vshapes = np.asarray(vshapes, dtype=np.uint32) + vdata = np.concatenate(vdata) + u = FlatFermionTensor(uq, ushapes, udata) + v = FlatFermionTensor(vq, vshapes, vdata) + + out = np.tensordot(u, s, axes=((-1,),(0,))) + out = np.tensordot(out, v, axes=((-1,),(0,))) + return out def ham_eye(const=1.): seven = SZ(0) diff --git a/quimb/tensor/test/test_env_ops.py b/quimb/tensor/test/test_env_ops.py new file mode 100644 index 00000000..3213faa0 --- /dev/null +++ b/quimb/tensor/test/test_env_ops.py @@ -0,0 +1,158 @@ +import quimb as qu +import numpy as np +from quimb.tensor.tensor_core import rand_uuid +from quimb.tensor.tensor_2d import is_lone_coo +from quimb.tensor.fermion_2d import FermionTensorNetwork2DVector, gen_mf_peps, FPEPS +from quimb.tensor.fermion import FermionTensorNetwork, FermionTensor, tensor_contract, FermionSpace +from quimb.tensor import fermion_ops as ops +import itertools + +def compute_env(psi, max_bond, bra=None, x_bsz=1, y_bsz=1): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + if bra is None: bra = ket.H + bra = bra.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + + norm = ket & bra + envs = norm._compute_plaquette_environments_row_first(x_bsz=x_bsz, y_bsz=y_bsz, layer_tags=layer_tags, max_bond=max_bond) + return envs + +def compute_expectation(env, psi, op, where, max_bond): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + + if is_lone_coo(where): + where = (where,) + else: + where = tuple(where) + + ng = len(where) + site_ix = [bra.site_ind(i, j) for i, j in where] + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) + newTG = bra.fermion_space.move_past(TG, (0, len(bra.fermion_space.tensor_order))) + if ng==1: + id = where + ((1,1),) + else: + x_bsz = abs(where[1][0] - where[0][0]) + 1 + y_bsz = abs(where[1][1] - where[0][1]) + 1 + id = (where[0], ) + ((x_bsz, y_bsz),) + if id not in env.keys(): + id = (where[1], ) + ((x_bsz, y_bsz),) + if id not in env.keys(): + raise KeyError("env does not fit with operator") + tn = env[id].copy() + fs = tn.fermion_space + ntsr = len(fs.tensor_order) + for i in range(ntsr-2*ng, ntsr): + tsr = fs[i][2] + if layer_tags[0] in tsr.tags: + tsr.reindex_(reindex_map) + tn.add_tensor(newTG, virtual=True) + out = tn.contract(all, optimize='auto-hq') + return out + +def contract_raw(psi, op, where): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + + if is_lone_coo(where): + where = (where,) + else: + where = tuple(where) + + ng = len(where) + site_ix = [bra.site_ind(i, j) for i, j in where] + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) + ket.reindex_(reindex_map) + tn = ket & TG & bra + out = tn.contract(all, optimize='auto-hq') + return out + +def contract_gate(psi, op, where): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + newket = ket.gate(op, where) + tn = newket & bra + out = tn.contract(all, optimize='auto-hq') + return out + +Lx = Ly = 4 +max_bond = 8 +state_array = np.random.randint(0,4, [Lx, Ly]) +psi = gen_mf_peps(state_array) +psi.view_as_(FermionTensorNetwork2DVector, like=psi) + +U = 4. +t = 2. +uop = ops.onsite_u(U) +nop = ops.count_n() +sz = ops.measure_sz() +hop = ops.hopping(t) + +env = compute_env(psi, max_bond) + +print("testing U") +for ix in range(Lx): + for iy in range(Ly): + where = (ix, iy) + out = compute_expectation(env, psi, uop, where, max_bond) + if state_array[ix,iy]==3: + print(U==out) + else: + print(0.==out) + +print("testing N") +for ix in range(Lx): + for iy in range(Ly): + where = (ix, iy) + out = compute_expectation(env, psi, nop, where, max_bond) + if state_array[ix,iy] ==0: + print(0.==out) + elif state_array[ix, iy] in [1, 2]: + print(1.==out) + else: + print(2.==out) + +print("testing sz") +for ix in range(Lx): + for iy in range(Ly): + where = (ix, iy) + out = compute_expectation(env, psi, sz, where, max_bond) + if state_array[ix,iy] in [0,3]: + print(0.==out) + elif state_array[ix, iy] ==1: + print(.5==out) + else: + print(-.5==out) + +print("testing hopping") + +Lx = Ly = 3 +psi = FPEPS.rand(Lx, Ly, 1, seed=33) + +psi.view_as_(FermionTensorNetwork2DVector, like=psi) +where = ((1,1),(1,2)) +out = contract_raw(psi, hop, where) +out1 = contract_gate(psi, hop, where) +env = compute_env(psi, max_bond, y_bsz=2) +out2 = compute_expectation(env, psi, hop, where, max_bond) +print(out, out1, out2) diff --git a/quimb/tensor/test/test_exp.py b/quimb/tensor/test/test_exp.py new file mode 100644 index 00000000..65b040e5 --- /dev/null +++ b/quimb/tensor/test/test_exp.py @@ -0,0 +1,132 @@ +import numpy as np +from quimb.tensor import fermion_ops as ops +from pyblock3.algebra.fermion import _pack_flat_tensor, SparseFermionTensor, _unpack_flat_tensor, FlatFermionTensor +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.symmetry import SZ + +t = 2 +hop = ops.hopping(t) + +def get_state(out): + vecmap = {(SZ(0), 0): "0,", + (SZ(0), 1): "-+,", + (SZ(1), 0): "+,", + (SZ(1), 1): "-,"} + outstring = "" + coeff = 0 + for iblk in out.blocks: + data = np.asarray(iblk) + inds = np.where(abs(data)>0.) + for ia, ib in zip(*inds): + key1 = (iblk.q_labels[0], ia) + key2 = (iblk.q_labels[1], ib) + val = data[ia, ib] + outstring += "+ %.4f|"%(val) + vecmap[key1] + vecmap[key2].replace(',','> ') + if vecmap[key1]+vecmap[key2] == "+,-,": + coeff = val + + if outstring=="": + outstring= "|0>" + return outstring + + +def get_err(out, out1): + nblk = len(out.q_labels) + err = [] + for i in range(nblk): + dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) + j = np.where(dlt==0)[0][0] + ist, ied = out.idxs[i], out.idxs[i+1] + jst, jed = out1.idxs[j], out1.idxs[j+1] + err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) + return max(err) + +tau = 0.1 +tsr = ops.to_exp(hop, tau) + +sx = SZ(0) +sy = SZ(1) + +blocks=[] +states = np.zeros([2,2]) +states[0,0] = 2**(-.5) +blocks.append(SubTensor(reduced=states, q_labels=(sx, sy))) +blocks.append(SubTensor(reduced=-states, q_labels=(sy, sx))) +# 2**.5 |0+> -2**.5|+0>, eigenstate of hopping(t) + +eval = t +instate = SparseFermionTensor(blocks=blocks) +instring = get_state(instate) +print("Input: ", instring) +outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstring0 = get_state(outstate0) +outstring = get_state(outstate) +print("Output0:", outstring0) +print("Output:", outstring) +print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*2**(-.5))) + +eval = -t +blocks=[] +states = np.zeros([2,2]) +states[0] = .5 +blocks.append(SubTensor(reduced=states, q_labels=(sx, sy))) #0+, 0- +blocks.append(SubTensor(reduced=states.T, q_labels=(sy, sx))) #+0, -0, eigenstate of hopping + +# .5 |0+> + .5 |0-> + .5 |+0> + .5 |-0>, eigenstate of hopping(-t) + +instate = SparseFermionTensor(blocks=blocks) +instring = get_state(instate) +print("Input: ", instring) +outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstring0 = get_state(outstate0) +outstring = get_state(outstate) +print("Output0:", outstring0) +print("Output:", outstring) +print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*(.5))) + + + + +eval = -2*t +blocks=[] +states = np.zeros([2,2]) +states[1,0] = states[0,1] = .5 +blocks.append(SubTensor(reduced=states, q_labels=(sx, sx))) +states = np.zeros([2,2]) +states[1,0] = .5 +states[0,1] =-.5 +blocks.append(SubTensor(reduced=states, q_labels=(sy, sy))) +instate = SparseFermionTensor(blocks=blocks) +# .5 |0,-+> + .5 |-+,0> + .5 |-,+> - .5|+,->, eigenstate (-2) +instring = get_state(instate) +print("Input: ", instring) +outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstring0 = get_state(outstate0) +outstring = get_state(outstate) +print("Output0:", outstring0) +print("Output:", outstring) +print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*(.5))) + +eval= 2*t +blocks=[] +states = np.zeros([2,2]) +states[1,0] = states[0,1] = .5 +blocks.append(SubTensor(reduced=states, q_labels=(sx, sx))) +states = np.zeros([2,2]) +states[1,0] =-.5 +states[0,1] =.5 +blocks.append(SubTensor(reduced=states, q_labels=(sy, sy))) +instate = SparseFermionTensor(blocks=blocks) +# .5 |0,-+> + .5 |-+,0> - .5 |-,+> + .5|+,->, eigenstate (2) +instring = get_state(instate) +print("Input: ", instring) +outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() +outstring0 = get_state(outstate0) +outstring = get_state(outstate) +print("Output0:", outstring0) +print("Output:", outstring) +print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*(.5))) diff --git a/quimb/tensor/test/test_norm_2d.py b/quimb/tensor/test/test_norm_2d.py new file mode 100644 index 00000000..c29eb207 --- /dev/null +++ b/quimb/tensor/test/test_norm_2d.py @@ -0,0 +1,72 @@ +import quimb as qu +import numpy as np +from quimb.tensor.tensor_core import rand_uuid +from quimb.tensor.tensor_2d import is_lone_coo +from quimb.tensor.fermion_2d import FermionTensorNetwork2DVector, gen_mf_peps +from quimb.tensor.fermion import FermionTensorNetwork, FermionTensor, tensor_contract, FermionSpace +from quimb.tensor import fermion_ops as ops +import itertools + +def compute_env(psi, max_bond, bra=None, x_bsz=1, y_bsz=1): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + if bra is None: bra = ket.H + bra = bra.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + + norm = ket & bra + envs = norm._compute_plaquette_environments_row_first(x_bsz=x_bsz, y_bsz=y_bsz, layer_tags=layer_tags, max_bond=max_bond) + return envs + + +def compute_expectation(env, psi, op, where, max_bond): + ket = psi.copy() + layer_tags=('KET', 'BRA') + + ket.add_tag(layer_tags[0]) + bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) + bra.mangle_inner_("*") + + if is_lone_coo(where): + where = (where,) + else: + where = tuple(where) + + ng = len(where) + site_ix = [bra.site_ind(i, j) for i, j in where] + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + + TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) + newTG = bra.fermion_space.move_past(TG, (0, len(bra.fermion_space.tensor_order))) + if ng==1: + tn = env[where+((1,1),)].copy() + fs = tn.fermion_space + ntsr = len(fs.tensor_order) + for i in range(ntsr-2*ng, ntsr): + tsr = fs[i][2] + if layer_tags[0] in tsr.tags: + tsr.reindex_(reindex_map) + tn.add_tensor(newTG, virtual=True) + out = tn.contract(all, optimize='auto-hq') + return out + + + +Lx = Ly = 6 +max_bond = 8 +state_array = np.random.randint(0,4, [Lx, Ly]) +psi = gen_mf_peps(state_array) +psi.view_as_(FermionTensorNetwork2DVector, like=psi) + +U = 4. +t = 2. +uop = ops.onsite_u(U) +nop = ops.count_n() +sz = ops.measure_sz() +hop = ops.hopping(t) + +out = psi.compute_norm(max_bond=max_bond) +print(out) From 212032ff3b4d217579e562f452583995ddc63850 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 26 Jan 2021 14:44:39 -0800 Subject: [PATCH 20/61] 2d.compute_local_expectation added --- quimb/tensor/fermion.py | 6 +- quimb/tensor/fermion_2d.py | 106 +++++++++++++++++++++++++++++- quimb/tensor/test/test_env_ops.py | 28 ++++++-- 3 files changed, 129 insertions(+), 11 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index fd7f6e6d..551fc740 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -487,7 +487,9 @@ def move(self, tid_or_site, des_site): if len(axes)>0: tsr.data._local_flip(axes) self.tensor_order[tid] = (tsr, des_site) - def move_past(self, tsr, site_range): + def move_past(self, tsr, site_range=None): + if site_range is None: + site_range = (0, len(self.tensor_order)) start, end = site_range iterator = range(start, end) shared_inds = [] @@ -1474,7 +1476,7 @@ def contract_tags(self, tags, inplace=False, which='any', **opts): def contract(self, tags=..., inplace=False, **opts): if tags is all: - return tensor_contract(*self, **opts) + return tensor_contract(*self, inplace=inplace, **opts) # this checks whether certain TN classes have a manually specified # contraction pattern (e.g. 1D along the line) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 2d61a695..f943fd9e 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -8,22 +8,52 @@ TensorNetwork2DVector, PEPS, is_lone_coo, - gen_long_range_path) + gen_long_range_path, + plaquette_to_sites) from .tensor_core import ( rand_uuid, oset, tags_to_oset, bonds ) +from .tensor_2d import calc_plaquette_sizes as _calc_plaquette_sizes +from .tensor_2d import calc_plaquette_map as _calc_plaquette_map from ..utils import check_opt, pairwise from collections import defaultdict from itertools import product import numpy as np import functools +from operator import add from pyblock3.algebra.fermion import FlatFermionTensor INVERSE_CUTOFF = 1e-10 +def calc_plaquette_sizes(pairs, autogroup=False): + if autogroup: + raise NotImplementedError + singles = [] + remainders = [] + for pair in pairs: + singles.append(is_lone_coo(pair)) + if not is_lone_coo(pair): + remainders.append(pair) + singles = (sum(singles) != 0) + if singles: + sizes = ((1,1),) + else: + sizes = () + if len(remainders) >0: + sizes += _calc_plaquette_sizes(remainders, autogroup) + return sizes + +def calc_plaquette_map(plaquettes): + plaqs = [p for p in plaquettes if p[1][0] * p[1][1]>1] + map = _calc_plaquette_map(plaqs) + for p in plaquettes: + if p[1][0] * p[1][1]==1: + map[p[0]] = p + return map + def gate_string_split_(TG, where, string, original_ts, bonds_along, reindex_map, site_ix, info, **compress_opts): # by default this means singuvalues are kept in the string 'blob' tensor @@ -927,15 +957,85 @@ def compute_local_expectation( self, terms, normalized=False, - autogroup=True, + autogroup=False, contract_optimize='auto-hq', return_all=False, + layer_tags=('KET', 'BRA'), plaquette_envs=None, plaquette_map=None, **plaquette_env_options, ): + norm, ket, bra = self.make_norm(return_all=True, layer_tags=layer_tags) + new_terms = dict() + for where, op in terms.items(): + if is_lone_coo(where): + _where = (where,) + else: + _where = tuple(where) + ng = len(_where) + site_ix = [bra.site_ind(i, j) for i, j in _where] + bnds = [rand_uuid() for _ in range(ng)] + TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) + new_terms[where] = bra.fermion_space.move_past(TG).data + + if plaquette_envs is None: + # set some sensible defaults + plaquette_env_options.setdefault('layer_tags', ('KET', 'BRA')) + + plaquette_envs = dict() + for x_bsz, y_bsz in calc_plaquette_sizes(terms, autogroup): + plaquette_envs.update(norm.compute_plaquette_environments( + x_bsz=x_bsz, y_bsz=y_bsz, **plaquette_env_options)) + + if plaquette_map is None: + # work out which plaquettes to use for which terms + plaquette_map = calc_plaquette_map(plaquette_envs) + + # now group the terms into just the plaquettes we need + plaq2coo = defaultdict(list) + for where, G in new_terms.items(): + p = plaquette_map[where] + plaq2coo[p].append((where, G)) + + expecs = dict() + for p in plaq2coo: + # site tags for the plaquette + # view the ket portion as 2d vector so we can gate it + tn = plaquette_envs[p] + if normalized: + norm_i0j0 = tn.contract(all, optimize=contract_optimize) + else: + norm_i0j0 = None - raise NotImplementedError + for where, G in plaq2coo[p]: + newtn = tn.copy() + if is_lone_coo(where): + _where = (where,) + else: + _where = tuple(where) + ng = len(_where) + site_ix = [bra.site_ind(i, j) for i, j in _where] + bnds = [rand_uuid() for _ in range(ng)] + reindex_map = dict(zip(site_ix, bnds)) + TG = FermionTensor(G.copy(), inds=site_ix+bnds, left_inds=site_ix) + ntsr = len(newtn.tensor_map) + fs = newtn.fermion_space + ng = len(where) + for i in range(ntsr-2*ng, ntsr): + tsr = fs[i][2] + if layer_tags[0] in tsr.tags: + tsr.reindex_(reindex_map) + newtn.add_tensor(TG, virtual=True) + expec_ij = newtn.contract(all, optimize=contract_optimize) + expecs[where] = expec_ij, norm_i0j0 + + if return_all: + return expecs + + if normalized: + return functools.reduce(add, (e / n for e, n in expecs.values())) + + return functools.reduce(add, (e for e, _ in expecs.values())) def normalize( self, diff --git a/quimb/tensor/test/test_env_ops.py b/quimb/tensor/test/test_env_ops.py index 3213faa0..f7c24214 100644 --- a/quimb/tensor/test/test_env_ops.py +++ b/quimb/tensor/test/test_env_ops.py @@ -108,23 +108,33 @@ def contract_gate(psi, op, where): sz = ops.measure_sz() hop = ops.hopping(t) -env = compute_env(psi, max_bond) - print("testing U") +terms = dict() +for i in range(Lx): + for j in range(Ly): + terms[(i,j)] = uop +exps = psi.compute_local_expectation(terms, return_all=True) + for ix in range(Lx): for iy in range(Ly): where = (ix, iy) - out = compute_expectation(env, psi, uop, where, max_bond) + out = exps[where][0] if state_array[ix,iy]==3: print(U==out) else: print(0.==out) print("testing N") +terms = dict() +for i in range(Lx): + for j in range(Ly): + terms[(i,j)] = nop +exps = psi.compute_local_expectation(terms, return_all=True) + for ix in range(Lx): for iy in range(Ly): where = (ix, iy) - out = compute_expectation(env, psi, nop, where, max_bond) + out = exps[where][0] if state_array[ix,iy] ==0: print(0.==out) elif state_array[ix, iy] in [1, 2]: @@ -132,11 +142,17 @@ def contract_gate(psi, op, where): else: print(2.==out) -print("testing sz") +print("testing Sz") +terms = dict() +for i in range(Lx): + for j in range(Ly): + terms[(i,j)] = sz +exps = psi.compute_local_expectation(terms, return_all=True) + for ix in range(Lx): for iy in range(Ly): where = (ix, iy) - out = compute_expectation(env, psi, sz, where, max_bond) + out = exps[where][0] if state_array[ix,iy] in [0,3]: print(0.==out) elif state_array[ix, iy] ==1: From 571c13c6df60a0d167aacc9e3dced4ea7f0caab4 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 26 Jan 2021 14:51:19 -0800 Subject: [PATCH 21/61] fix exponential sign --- quimb/tensor/fermion_ops.py | 2 +- quimb/tensor/test/test_exp.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py index 0ad13a78..ff06259f 100644 --- a/quimb/tensor/fermion_ops.py +++ b/quimb/tensor/fermion_ops.py @@ -25,7 +25,7 @@ def to_exp(tsr, x): for szlab in sz_labels: data, row_map, col_map = _pack_flat_tensor(tsr, szlab, ax, parity_axes) el, ev = np.linalg.eig(data) - s = np.diag(np.exp(-el*x)) + s = np.diag(np.exp(el*x)) _unpack_flat_tensor(ev, row_map, 0, udata, uq, ushapes, parity_axes) _unpack_flat_tensor(ev.conj().T, col_map, 1, vdata, vq, vshapes) sq.append([SZ.to_flat(iq) for iq in szlab]) diff --git a/quimb/tensor/test/test_exp.py b/quimb/tensor/test/test_exp.py index 65b040e5..4004bf1b 100644 --- a/quimb/tensor/test/test_exp.py +++ b/quimb/tensor/test/test_exp.py @@ -42,7 +42,7 @@ def get_err(out, out1): return max(err) tau = 0.1 -tsr = ops.to_exp(hop, tau) +tsr = ops.to_exp(hop, -tau) sx = SZ(0) sy = SZ(1) From 13f71c64bdaaf961d8a61c2c167bea5d5eed2141 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 26 Jan 2021 15:24:37 -0800 Subject: [PATCH 22/61] reorganize --- quimb/tensor/fermion_2d.py | 314 ++++++++++++++++++------------------ quimb/tensor/fermion_ops.py | 7 +- 2 files changed, 159 insertions(+), 162 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index f943fd9e..bcdb03b5 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -6,6 +6,7 @@ from .tensor_2d import ( TensorNetwork2D, TensorNetwork2DVector, + TensorNetwork2DFlat, PEPS, is_lone_coo, gen_long_range_path, @@ -621,163 +622,6 @@ def reorder_left_row(self, direction="upward", layer_tags=None, inplace=False): direction = "l" + direction[0] return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) - -class FPEPS(FermionTensorNetwork2D, - PEPS): - - - _EXTRA_PROPS = ( - '_site_tag_id', - '_row_tag_id', - '_col_tag_id', - '_Lx', - '_Ly', - '_site_ind_id', - ) - - def __init__(self, arrays, *, shape='urdlp', tags=None, - site_ind_id='k{},{}', site_tag_id='I{},{}', - row_tag_id='ROW{}', col_tag_id='COL{}', - order_iterator=None, **tn_opts): - - if isinstance(arrays, FPEPS): - super().__init__(arrays) - return - - tags = tags_to_oset(tags) - self._site_ind_id = site_ind_id - self._site_tag_id = site_tag_id - self._row_tag_id = row_tag_id - self._col_tag_id = col_tag_id - - arrays = tuple(tuple(x for x in xs) for xs in arrays) - self._Lx = len(arrays) - self._Ly = len(arrays[0]) - tensors = [] - - # cache for both creating and retrieving indices - ix = defaultdict(rand_uuid) - - if order_iterator is None: - order_iterator = product(range(self.Lx), range(self.Ly)) - for i, j in order_iterator: - array = arrays[i][j] - - # figure out if we need to transpose the arrays from some order - # other than up right down left physical - array_order = shape - if i == self.Lx - 1: - array_order = array_order.replace('u', '') - if j == self.Ly - 1: - array_order = array_order.replace('r', '') - if i == 0: - array_order = array_order.replace('d', '') - if j == 0: - array_order = array_order.replace('l', '') - - # allow convention of missing bonds to be singlet dimensions - if array.ndim != len(array_order): - raise ValueError("array shape not matching array order") - - transpose_order = tuple( - array_order.find(x) for x in 'urdlp' if x in array_order - ) - - if transpose_order != tuple(range(len(array_order))): - array = array.transpose(transpose_order) - - # get the relevant indices corresponding to neighbours - inds = [] - if 'u' in array_order: - inds.append(ix[(i + 1, j), (i, j)]) - if 'r' in array_order: - inds.append(ix[(i, j), (i, j + 1)]) - if 'd' in array_order: - inds.append(ix[(i, j), (i - 1, j)]) - if 'l' in array_order: - inds.append(ix[(i, j - 1), (i, j)]) - inds.append(self.site_ind(i, j)) - - # mix site, row, column and global tags - - ij_tags = tags | oset((self.site_tag(i, j), - self.row_tag(i), - self.col_tag(j))) - # create the site tensor! - tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) - super().__init__(tensors, check_collisions=False, **tn_opts) - - @classmethod - def rand(cls, Lx, Ly, bond_dim, phys_dim=2, - dtype=float, seed=None, parity=None, - **peps_opts): - """Create a random (un-normalized) PEPS. - - Parameters - ---------- - Lx : int - The number of rows. - Ly : int - The number of columns. - bond_dim : int - The bond dimension. - physical : int, optional - The physical index dimension. - dtype : dtype, optional - The dtype to create the arrays with, default is real double. - seed : int, optional - A random seed. - parity: int or int array of (0,1), optional - parity for each site, default is random parity for all sites - peps_opts - Supplied to :class:`~quimb.tensor.tensor_2d.PEPS`. - - Returns - ------- - psi : PEPS - """ - if seed is not None: - np.random.seed(seed) - - arrays = [[None for _ in range(Ly)] for _ in range(Lx)] - - from pyblock3.algebra.fermion import SparseFermionTensor - from pyblock3.algebra.symmetry import SZ, BondInfo - - if isinstance(parity, np.ndarray): - if not parity.shape != (Lx, Ly): - raise ValueError("parity array shape not matching (Lx, Ly)") - elif isinstance(parity, int): - parity = np.ones((Lx, Ly), dtype=int) * (parity % 2) - elif parity is None: - parity = np.random.randint(0,2,Lx*Ly).reshape(Lx, Ly) - else: - raise TypeError("parity type not recoginized") - - vir_info = BondInfo({SZ(0): bond_dim, SZ(1): bond_dim}) - phy_info = BondInfo({SZ(0): phys_dim, SZ(1): phys_dim}) - - for i, j in product(range(Lx), range(Ly)): - - shape = [] - if i != Lx - 1: # bond up - shape.append(vir_info) - if j != Ly - 1: # bond right - shape.append(vir_info) - if i != 0: # bond down - shape.append(vir_info) - if j != 0: # bond left - shape.append(vir_info) - - shape.append(phy_info) - dq = SZ(parity[i][j]) - - arrays[i][j] = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() - - - return cls(arrays, **peps_opts) - - class FermionTensorNetwork2DVector(FermionTensorNetwork2D, FermionTensorNetwork, TensorNetwork2DVector): @@ -1046,6 +890,162 @@ def normalize( ): raise NotImplementedError +class FPEPS(FermionTensorNetwork2DVector, + FermionTensorNetwork2D, + PEPS, + TensorNetwork2DFlat): + + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_site_ind_id', + ) + + def __init__(self, arrays, *, shape='urdlp', tags=None, + site_ind_id='k{},{}', site_tag_id='I{},{}', + row_tag_id='ROW{}', col_tag_id='COL{}', + order_iterator=None, **tn_opts): + + if isinstance(arrays, FPEPS): + super().__init__(arrays) + return + + tags = tags_to_oset(tags) + self._site_ind_id = site_ind_id + self._site_tag_id = site_tag_id + self._row_tag_id = row_tag_id + self._col_tag_id = col_tag_id + + arrays = tuple(tuple(x for x in xs) for xs in arrays) + self._Lx = len(arrays) + self._Ly = len(arrays[0]) + tensors = [] + + # cache for both creating and retrieving indices + ix = defaultdict(rand_uuid) + + if order_iterator is None: + order_iterator = product(range(self.Lx), range(self.Ly)) + for i, j in order_iterator: + array = arrays[i][j] + + # figure out if we need to transpose the arrays from some order + # other than up right down left physical + array_order = shape + if i == self.Lx - 1: + array_order = array_order.replace('u', '') + if j == self.Ly - 1: + array_order = array_order.replace('r', '') + if i == 0: + array_order = array_order.replace('d', '') + if j == 0: + array_order = array_order.replace('l', '') + + # allow convention of missing bonds to be singlet dimensions + if array.ndim != len(array_order): + raise ValueError("array shape not matching array order") + + transpose_order = tuple( + array_order.find(x) for x in 'urdlp' if x in array_order + ) + + if transpose_order != tuple(range(len(array_order))): + array = array.transpose(transpose_order) + + # get the relevant indices corresponding to neighbours + inds = [] + if 'u' in array_order: + inds.append(ix[(i + 1, j), (i, j)]) + if 'r' in array_order: + inds.append(ix[(i, j), (i, j + 1)]) + if 'd' in array_order: + inds.append(ix[(i, j), (i - 1, j)]) + if 'l' in array_order: + inds.append(ix[(i, j - 1), (i, j)]) + inds.append(self.site_ind(i, j)) + + # mix site, row, column and global tags + + ij_tags = tags | oset((self.site_tag(i, j), + self.row_tag(i), + self.col_tag(j))) + # create the site tensor! + tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) + super().__init__(tensors, check_collisions=False, **tn_opts) + + @classmethod + def rand(cls, Lx, Ly, bond_dim, phys_dim=2, + dtype=float, seed=None, parity=None, + **peps_opts): + """Create a random (un-normalized) PEPS. + + Parameters + ---------- + Lx : int + The number of rows. + Ly : int + The number of columns. + bond_dim : int + The bond dimension. + physical : int, optional + The physical index dimension. + dtype : dtype, optional + The dtype to create the arrays with, default is real double. + seed : int, optional + A random seed. + parity: int or int array of (0,1), optional + parity for each site, default is random parity for all sites + peps_opts + Supplied to :class:`~quimb.tensor.tensor_2d.PEPS`. + + Returns + ------- + psi : PEPS + """ + if seed is not None: + np.random.seed(seed) + + arrays = [[None for _ in range(Ly)] for _ in range(Lx)] + + from pyblock3.algebra.fermion import SparseFermionTensor + from pyblock3.algebra.symmetry import SZ, BondInfo + + if isinstance(parity, np.ndarray): + if not parity.shape != (Lx, Ly): + raise ValueError("parity array shape not matching (Lx, Ly)") + elif isinstance(parity, int): + parity = np.ones((Lx, Ly), dtype=int) * (parity % 2) + elif parity is None: + parity = np.random.randint(0,2,Lx*Ly).reshape(Lx, Ly) + else: + raise TypeError("parity type not recoginized") + + vir_info = BondInfo({SZ(0): bond_dim, SZ(1): bond_dim}) + phy_info = BondInfo({SZ(0): phys_dim, SZ(1): phys_dim}) + + for i, j in product(range(Lx), range(Ly)): + + shape = [] + if i != Lx - 1: # bond up + shape.append(vir_info) + if j != Ly - 1: # bond right + shape.append(vir_info) + if i != 0: # bond down + shape.append(vir_info) + if j != 0: # bond left + shape.append(vir_info) + + shape.append(phy_info) + dq = SZ(parity[i][j]) + + arrays[i][j] = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() + + return cls(arrays, **peps_opts) + def _gen_site_wfn_tsr(state, ndim=2, ax=0): from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py index ff06259f..ba2a4050 100644 --- a/quimb/tensor/fermion_ops.py +++ b/quimb/tensor/fermion_ops.py @@ -51,11 +51,8 @@ def to_exp(tsr, x): out = np.tensordot(out, v, axes=((-1,),(0,))) return out -def ham_eye(const=1.): - seven = SZ(0) - sodd = SZ(1) - info = BondInfo({seven:2, sodd:2}) - return FlatFermionTensor.eye(info) +def ham_eye(bond_info): + return FlatFermionTensor.eye(bond_info) def gen_h1(h=1.): blocks= [] From 479399bb7499f4ecab737acefcb7f43da2d07487 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 26 Jan 2021 16:42:41 -0800 Subject: [PATCH 23/61] add fermion tensor norm, rename operator identity --- quimb/tensor/fermion.py | 5 +++++ quimb/tensor/fermion_ops.py | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 551fc740..44841ebb 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -719,6 +719,11 @@ def fermion_owner(self): def parity(self): return self.data.parity + def norm(self): + """Frobenius norm of this tensor. + """ + return np.linalg.norm(self.data.data, 2) + def ind_size(self, dim_or_ind): if isinstance(dim_or_ind, str): if dim_or_ind not in self.inds: diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py index ba2a4050..346a97a6 100644 --- a/quimb/tensor/fermion_ops.py +++ b/quimb/tensor/fermion_ops.py @@ -51,8 +51,7 @@ def to_exp(tsr, x): out = np.tensordot(out, v, axes=((-1,),(0,))) return out -def ham_eye(bond_info): - return FlatFermionTensor.eye(bond_info) +eye = FlatFermionTensor.eye def gen_h1(h=1.): blocks= [] From 32afdbcf2482e46671dea98a7a3bbf830ee26a07 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 27 Jan 2021 13:59:27 -0800 Subject: [PATCH 24/61] add tensor_balance_bonds --- quimb/tensor/fermion.py | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 44841ebb..77037cff 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -295,6 +295,31 @@ def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): fs.move(tid2, site2) return T1, T2 +def tensor_balance_bond(t1, t2, smudge=1e-6): + from pyblock3.algebra.core import SubTensor + from pyblock3.algebra.fermion import SparseFermionTensor + ix, = t1.bonds(t2) + t1H = t1.H.reindex_({ix: ix+'*'}) + t2H = t2.H.reindex_({ix: ix+'*'}) + out = tensor_contract(t1H, t1) + out1 = tensor_contract(t2H, t2) + sblk1 = [] + sblk2 = [] + for iblk1 in out.data.to_sparse(): + for iblk2 in out1.data.to_sparse(): + if iblk1.q_labels != iblk2.q_labels: + continue + x = np.diag(np.asarray(iblk1)) + y = np.diag(np.asarray(iblk2)) + s = (x + smudge) / (y + smudge) + sblk1.append(SubTensor(reduced=np.diag(s**-0.25), q_labels=iblk1.q_labels)) + sblk2.append(SubTensor(reduced=np.diag(s**0.25), q_labels=iblk2.q_labels)) + + s1 = SparseFermionTensor(blocks=sblk1).to_flat() + s2 = SparseFermionTensor(blocks=sblk2).to_flat() + t1.multiply_index_diagonal_(ix, s1, location="back") + t2.multiply_index_diagonal_(ix, s2, location="front") + class FermionSpace: """A labelled, ordered dictionary. The tensor labels point to the tensor and its position inside the fermion space. @@ -980,6 +1005,32 @@ def _reorder_from_tid(self, tid_map, inplace=False): tn.fermion_space._reorder_from_dict(tid_map) return tn + def balance_bonds(self, inplace=False): + """Apply :func:`~quimb.tensor.fermion.tensor_balance_bond` to + all bonds in this tensor network. + + Parameters + ---------- + inplace : bool, optional + Whether to perform the bond balancing inplace or not. + + Returns + ------- + TensorNetwork + """ + tn = self if inplace else self.copy() + + for ix, tids in tn.ind_map.items(): + if len(tids) != 2: + continue + tid1, tid2 = tids + t1, t2 = [tn.tensor_map[x] for x in (tid1, tid2)] + tensor_balance_bond(t1, t2) + + return tn + + balance_bonds_ = functools.partialmethod(balance_bonds, inplace=True) + def assemble_with_tensor(self, tsr): if not is_mergeable(self, tsr): raise ValueError("tensor not same in the fermion space of the tensor network") From 794c74801fccc806ca42b21d83e4bcf8dd0775c7 Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 28 Jan 2021 16:35:20 -0800 Subject: [PATCH 25/61] add hubbard module; bugfix --- quimb/tensor/fermion_2d.py | 12 +++++------- quimb/tensor/fermion_ops.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index bcdb03b5..b9f5f7bd 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -29,9 +29,7 @@ INVERSE_CUTOFF = 1e-10 -def calc_plaquette_sizes(pairs, autogroup=False): - if autogroup: - raise NotImplementedError +def calc_plaquette_sizes(pairs, autogroup=True): singles = [] remainders = [] for pair in pairs: @@ -801,7 +799,7 @@ def compute_local_expectation( self, terms, normalized=False, - autogroup=False, + autogroup=True, contract_optimize='auto-hq', return_all=False, layer_tags=('KET', 'BRA'), @@ -864,9 +862,9 @@ def compute_local_expectation( TG = FermionTensor(G.copy(), inds=site_ix+bnds, left_inds=site_ix) ntsr = len(newtn.tensor_map) fs = newtn.fermion_space - ng = len(where) - for i in range(ntsr-2*ng, ntsr): - tsr = fs[i][2] + tids = newtn._get_tids_from_inds(site_ix, which='any') + for tid_ in tids: + tsr = newtn.tensor_map[tid_] if layer_tags[0] in tsr.tags: tsr.reindex_(reindex_map) newtn.add_tensor(TG, virtual=True) diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py index 346a97a6..eb9ace5b 100644 --- a/quimb/tensor/fermion_ops.py +++ b/quimb/tensor/fermion_ops.py @@ -85,6 +85,25 @@ def onsite_u(u=1): umat = SparseFermionTensor(blocks=blocks).to_flat() return umat +def hubbard(t, u, fac=None): + if fac is None: + fac = (1, 1) + faca, facb = fac + ham = hopping(t).to_sparse() + for iblk in ham: + qin, qout = iblk.q_labels[:2], iblk.q_labels[2:] + if qin != qout: continue + in_pair = [iq.n for iq in qin] + if in_pair == [0,0]: + iblk[1,0,1,0] += faca * u + iblk[0,1,0,1] += facb * u + iblk[1,1,1,1] += (faca + facb) * u + elif in_pair == [0,1]: + iblk[1,:,1,:] += faca * u * np.eye(2) + elif in_pair == [1,0]: + iblk[:,1,:,1] += facb * u * np.eye(2) + return ham.to_flat() + def count_n(): nmat0 = np.zeros([2,2]) nmat0[1,1] = 2 From 1a1d032f41ed49b7b4e9ed347c6a76f3be24336a Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 29 Jan 2021 16:19:22 -0800 Subject: [PATCH 26/61] fermion_2d_tebd added --- quimb/tensor/fermion_2d_tebd.py | 356 ++++++++++++++++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100644 quimb/tensor/fermion_2d_tebd.py diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py new file mode 100644 index 00000000..1df9780c --- /dev/null +++ b/quimb/tensor/fermion_2d_tebd.py @@ -0,0 +1,356 @@ +import numpy as np +import random +import collections +from itertools import product +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor +from quimb.tensor.fermion_2d import FPEPS,FermionTensorNetwork2DVector +from quimb.tensor.fermion_ops import to_exp, eye, hubbard +from pyblock3.algebra.symmetry import SZ, BondInfo +from quimb.tensor.tensor_2d_tebd import SimpleUpdate as _SimpleUpdate +from quimb.tensor.tensor_2d_tebd import conditioner +from quimb.utils import pairwise +from quimb.tensor.tensor_2d import (gen_long_range_path, + nearest_neighbors) + +SMALL_VAL = 1e-10 + +def Hubbard2D(t, u, Lx, Ly): + ham = dict() + count_neighbour = lambda i,j: (i>0) + (i0) + (jSMALL_VAL] += self.gauge_smudge + Tij.multiply_index_diagonal_( + ind=bond_ind, x=mult_val, location=location) + + # absorb the inner bond gauges equally into both sites along string + for site_a, site_b in pairwise(string): + Ta, Tb = self._psi[site_a], self._psi[site_b] + if (site_a, site_b) in self.gauges: + Tsval = self.gauges[(site_a, site_b)] + loca, locb = ("back", "front") + elif (site_b, site_a) in self.gauges: + Tsval = self.gauges[(site_b, site_a)] + loca, locb = ("front", "back") + else: + raise KeyError("gauge not found") + + mult_val = Tsval.copy() + mult_val.data = Tsval.data ** .5 + bnd = self._psi.bond(site_a, site_b) + Ta.multiply_index_diagonal_(ind=bnd, x=mult_val, location=loca) + Tb.multiply_index_diagonal_(ind=bnd, x=mult_val, location=locb) + + # perform the gate, retrieving new bond singular values + info = dict() + self._psi.gate_(U, where, absorb=None, info=info, + long_range_path_sequence=path, **self.gate_opts) + + # set the new singualar values all along the chain + for site_a, site_b in pairwise(string): + if ('singular_values', (site_a, site_b)) in info: + bond_pair = (site_a, site_b) + else: + bond_pair = (site_b, site_a) + s = info['singular_values', bond_pair] + if self.gauge_renorm: + # keep the singular values from blowing up + s = s / np.sum(s.data**2) ** 0.5 + + if bond_pair not in self.gauges: + del self.gauges[(bond_pair[1], bond_pair[0])] + + self.gauges[bond_pair] = s + + # absorb the 'outer' gauges from these neighbours + for site in string: + Tij = self._psi[site] + for neighbour in neighbours[site]: + if (site, neighbour) in self.gauges: + Tsval = self.gauges[(site, neighbour)] + location = "back" + elif (neighbour, site) in self.gauges: + Tsval = self.gauges[(neighbour, site)] + location = "front" + else: + raise KeyError("gauge not found") + bnd = self._psi.bond(site, neighbour) + mult_val = Tsval.copy() + non_zero_ind = abs(mult_val.data)>SMALL_VAL + mult_val.data[non_zero_ind] = (mult_val.data[non_zero_ind] + self.gauge_smudge) ** -1 + Tij.multiply_index_diagonal_( + ind=bnd, x=mult_val, location=location) + + def get_state(self, absorb_gauges=True): + """Return the state, with the diagonal bond gauges either absorbed + equally into the tensors on either side of them + (``absorb_gauges=True``, the default), or left lazily represented in + the tensor network with hyperedges (``absorb_gauges=False``). + """ + psi = self._psi.copy() + + if not absorb_gauges: + raise NotImplementedError + else: + for (ija, ijb), Tsval in self.gauges.items(): + bnd = psi.bond(ija, ijb) + Ta = psi[ija] + Tb = psi[ijb] + mult_val = Tsval.copy() + mult_val.data = Tsval.data ** .5 + Ta.multiply_index_diagonal_(bnd, mult_val, location='back') + Tb.multiply_index_diagonal_(bnd, mult_val, location='front') + + if self.condition_tensors: + conditioner(psi, balance_bonds=self.condition_balance_bonds) + + return psi From 7184740ea8a4f00d07ff75feefd575ba04e2d3ed Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 2 Feb 2021 10:39:39 -0800 Subject: [PATCH 27/61] rescale random fermion tensors --- quimb/tensor/fermion_2d.py | 4 +- quimb/tensor/test/test_2d.py | 170 ----------------------------- quimb/tensor/test/test_contract.py | 76 ------------- 3 files changed, 3 insertions(+), 247 deletions(-) delete mode 100644 quimb/tensor/test/test_2d.py delete mode 100644 quimb/tensor/test/test_contract.py diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index b9f5f7bd..6ccfe868 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -1040,7 +1040,9 @@ def rand(cls, Lx, Ly, bond_dim, phys_dim=2, shape.append(phy_info) dq = SZ(parity[i][j]) - arrays[i][j] = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() + tsr = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() + tsr.data /= np.linalg.norm(tsr.data, 2) **(1.5 / tsr.ndim) + arrays[i][j] = tsr return cls(arrays, **peps_opts) diff --git a/quimb/tensor/test/test_2d.py b/quimb/tensor/test/test_2d.py deleted file mode 100644 index bc344e8c..00000000 --- a/quimb/tensor/test/test_2d.py +++ /dev/null @@ -1,170 +0,0 @@ -import quimb as qu -import quimb.tensor as qtn -from quimb.tensor.tensor_2d import PEPS -import numpy as np -from quimb.tensor.fermion_2d import FPEPS -from quimb.tensor.fermion import _fetch_fermion_space -from quimb.tensor.tensor_core import oset -Lx = 2 -Ly = 3 -D = 4 -np.random.seed(3) - -def tensor_compress_bond( - T1, - T2, - reduced=True, - absorb='both', - info=None, - **compress_opts -): - fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) - - site1, site2 = fs[tid1][1], fs[tid2][1] - - if site1 < site2: - Tl, Tr = T1, T2 - tidl, tidr = tid1, tid2 - else: - Tl, Tr = T2, T1 - tidl, tidr = tid2, tid1 - - left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] - right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] - - out = fs._contract_pairs(tidl, tidr, direction="left") - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) - return l, r - -def get_err(max_bond=None): - if max_bond is None: max_bond = 2*D**2 - - - psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) - tsr1 = psi[0,0] - tsr2 = psi[1,0] - - for x in range(Lx): - psi.contract_between((0,x), (1,x)) - tsr1 = psi[0,0] - tsr2 = psi[0,1] - - - inds_contr = [i for i in tsr1.inds if i in tsr2.inds] - outinds = [i for i in tsr1.inds if i not in tsr2.inds] - idxa = [tsr1.inds.index(i) for i in inds_contr] - idxb = [tsr2.inds.index(i) for i in inds_contr] - - out = np.tensordot(tsr1.data, tsr2.data, axes=(idxa, idxb)) - - l, r = tensor_compress_bond(tsr1, tsr2, max_bond=max_bond) - - inds_contr = [i for i in l.inds if i in r.inds] - outinds = [i for i in l.inds if i not in r.inds] - idxa = [l.inds.index(i) for i in inds_contr] - idxb = [r.inds.index(i) for i in inds_contr] - fidx = [i for i in l.inds+r.inds if i not in inds_contr] - - out1 = np.tensordot(l.data, r.data, axes=(idxa, idxb)) - - - nblk = out.shapes.shape[0] - - err = [] - for i in range(nblk): - dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) - j = np.where(dlt==0)[0][0] - ist, ied = out.idxs[i], out.idxs[i+1] - jst, jed = out1.idxs[j], out1.idxs[j+1] - err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) - return max(err) - - - - -psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) - -ket = psi.copy() - -layer_tags=('KET', 'BRA') - -ket.add_tag(layer_tags[0]) - - -bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) -bra.mangle_inner_("*") - -norm = bra & ket - -def contract_all(tn): - Lx, Ly = tn._Lx, tn._Ly - for i in range(Lx): - for j in range(Ly): - x1, x2 = tn[i,j] - tn.contract_between(x1.tags, x2.tags) - for i in range(Lx): - for j in range(Ly-1): - x1 = tn[i,j] - x2 = tn[i,j+1] - tn.contract_between(x1.tags, x2.tags) - for i in range(Lx-1): - x1 = tn[i,0] - x2 = tn[i+1,0] - out = tn.contract_between(x1.tags, x2.tags) - return out - -def contract_left(tn): - Lx, Ly = tn._Lx, tn._Ly - for i in range(Lx): - for j in range(Ly): - x1, x2 = tn[i,j] - tn.contract_between(x1.tags, x2.tags) - for j in range(Ly): - for i in range(Lx-1): - x1 = tn[i,j] - x2 = tn[i+1,j] - tn.contract_between(x1.tags, x2.tags) - for i in range(Ly-1): - x1 = tn[0,i] - x2 = tn[0,i+1] - out = tn.contract_between(x1.tags, x2.tags) - return out - - -fs = norm.fermion_space -norm1 = norm.copy() - -size = Lx * Ly -for i in range(size): - norm1.fermion_space.move(2*size-1, 2*i+1) - -out1 = contract_all(norm1) - -tag1 = norm.site_tag(0, 0)#, self.site_tag(i + 1, j) -tag2 = norm.site_tag(0, 1) - -out2 = contract_left(norm) - -print(out1, out2) -#print(hash(x[0]), hash(x[1])) -#norm.contract_boundary() - -#x1, x2 = norm[0,0] -#norm.contract_between(x1.tags, x2.tags) - -#tid1, = norm._get_tids_from_tags(x1.tags, which='all') -#print(tid1) -#tid2, = norm._get_tids_from_tags(x2.tags, which='all') -#print(tid2) -#norm.contract_between(tagged_tids[0], tagged_tids[1]) -#x = norm[0,0] -#print(x[0].fermion_owner[2],x[1].fermion_owner[2])#, type(x[1])) - -#print(tid1) -#print(tid2) -#print(hash(bra[0,0]), hash(ket[0,0])) -exit() -for x in range(Lx-1): - for y in range(Ly-1): - out = psi.contract_between((0,x), (1,x)) - print(x, y, "done") diff --git a/quimb/tensor/test/test_contract.py b/quimb/tensor/test/test_contract.py deleted file mode 100644 index 924060e2..00000000 --- a/quimb/tensor/test/test_contract.py +++ /dev/null @@ -1,76 +0,0 @@ -import unittest -import numpy as np -from pyblock3.algebra.symmetry import SZ, BondInfo -from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor -from quimb.tensor import fermion -import copy -import quimb as qu - -np.random.seed(3) -x = SZ(0,0,0) -y = SZ(1,0,0) -infox = BondInfo({x:3, y: 2}) - -infoy = BondInfo({x:5, y: 5}) - - -asp = SparseFermionTensor.random((infoy,infox,infox), dq=y) -abc = FlatFermionTensor.from_sparse(asp) - -bsp = SparseFermionTensor.random((infox,infox,infox), dq=y) -bcd = FlatFermionTensor.from_sparse(bsp) - -csp = SparseFermionTensor.random((infox,infox,infoy), dq=y) -efa = FlatFermionTensor.from_sparse(csp) - -dsp = SparseFermionTensor.random((infox,infox,infox), dq=y) -def_ = FlatFermionTensor.from_sparse(dsp) - - -def finger(x): - dat = x.data.data - return (dat*np.sin(dat.size)).sum() - -bcef = np.tensordot(abc, efa, axes=[(0,),(2,)]) -efd = np.tensordot(bcef, bcd, axes=[(0,1),(0,1)]) -dat = np.tensordot(efd, def_, axes=[(0,1,2),(1,2,0)]) - -bcef2 = np.tensordot(bcd, def_, axes=[(2,),(0,)]) -dat1 = np.tensordot(bcef, bcef2, axes=[(0,1,2,3),(0,1,2,3)]) - -x = fermion.FermionTensor(abc, inds=['a','b','c'], tags=["x"]) -y = fermion.FermionTensor(efa, inds=['e','f','a'], tags=["y"]) -z = fermion.FermionTensor(bcd, inds=['b','c','d'], tags=["z"]) -w = fermion.FermionTensor(def_, inds=['d','e','f'], tags=["w"]) - -tn = fermion.FermionTensorNetwork((x, y, z, w)) - -tn1 = tn.copy() -tn2 = tn.copy() - -tn.contract_between(["x"], ["y"]) -tn.contract_between(["x", "y"], ["w"]) -out = tn.contract_between(["x", "y", "w"], ["z"]) - -print(dat.data[0], dat1.data[0], out) - -tn1.contract_between(["x"], ["z"]) -tn1.contract_between(["y"], ["w"]) -out = tn1.contract_between(["x", "z"], ["y","w"]) -print(dat.data[0], dat1.data[0], out) - - -tids = tn2._get_tids_from_inds(["b","c"]) -tn2.contract_ind(["b","c"]) -tn2.contract_ind(["a"]) -out = tn2.contract_ind(["f"]) -print(dat.data[0], dat1.data[0], out) - - -fs = fermion.FermionSpace() -fs.add_tensor(x, virtual=True) -fs.add_tensor(y, virtual=True) -fs.add_tensor(z, virtual=True) -fs.add_tensor(w, virtual=True) -out = fermion.tensor_contract(w, y, z, x, inplace=True, direction="right") -print(dat.data[0], dat1.data[0], out) From 77567cfe8b8179a73586a13731da1918f5a5ec86 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 2 Feb 2021 10:51:16 -0800 Subject: [PATCH 28/61] first syn --- quimb/tensor/fermion.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 77037cff..681d07b1 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -707,7 +707,7 @@ class FermionTensor(Tensor): def __init__(self, data=1.0, inds=(), tags=None, left_inds=None, fermion_owner=None): # a new or copied Tensor always has no owners - self.owners = dict() + self._owners = dict() # Short circuit for copying Tensors if isinstance(data, self.__class__): @@ -975,6 +975,7 @@ def __init__(self, ts, *, virtual=False, check_collisions=True): self.tensor_map[tid].add_owner(self, tid) for ep in ts.__class__._EXTRA_PROPS: setattr(self, ep, getattr(ts, ep)) + self.exponent = ts.exponent return # internal structure @@ -986,6 +987,7 @@ def __init__(self, ts, *, virtual=False, check_collisions=True): for t in ts: self.add(t, virtual=virtual, check_collisions=check_collisions) self._inner_inds = None + self.exponent = 0.0 def __and__(self, other): """Combine this tensor network with more tensors, without contracting. From dd2d8cb8bdfd6432881c7db47895bb2fd2ae5964 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 2 Feb 2021 15:21:44 -0800 Subject: [PATCH 29/61] rename to_exponential --- quimb/tensor/fermion_ops.py | 2 +- quimb/tensor/test/test_env.py | 147 ------------------------- quimb/tensor/test/test_env_ops.py | 174 ------------------------------ quimb/tensor/test/test_exp.py | 132 ----------------------- quimb/tensor/test/test_gate_2d.py | 64 ----------- quimb/tensor/test/test_norm.py | 163 ---------------------------- quimb/tensor/test/test_norm_2d.py | 72 ------------- quimb/tensor/test/test_ops.py | 68 ------------ 8 files changed, 1 insertion(+), 821 deletions(-) delete mode 100644 quimb/tensor/test/test_env.py delete mode 100644 quimb/tensor/test/test_env_ops.py delete mode 100644 quimb/tensor/test/test_exp.py delete mode 100644 quimb/tensor/test/test_gate_2d.py delete mode 100644 quimb/tensor/test/test_norm.py delete mode 100644 quimb/tensor/test/test_norm_2d.py delete mode 100644 quimb/tensor/test/test_ops.py diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py index eb9ace5b..e2268116 100644 --- a/quimb/tensor/fermion_ops.py +++ b/quimb/tensor/fermion_ops.py @@ -5,7 +5,7 @@ from pyblock3.algebra.symmetry import SZ, BondInfo from .fermion_2d import FPEPS,FermionTensorNetwork2DVector -def to_exp(tsr, x): +def to_exponential(tsr, x): ndim = tsr.ndim if tsr.parity == 1: raise ValueError("expontial of odd parity tensor not defined") diff --git a/quimb/tensor/test/test_env.py b/quimb/tensor/test/test_env.py deleted file mode 100644 index f9b9428b..00000000 --- a/quimb/tensor/test/test_env.py +++ /dev/null @@ -1,147 +0,0 @@ -import quimb as qu -import quimb.tensor as qtn -from quimb.tensor.tensor_2d import PEPS -import numpy as np -from quimb.tensor.fermion_2d import FPEPS -from quimb.tensor.fermion import _fetch_fermion_space, FermionTensorNetwork -from quimb.tensor.tensor_core import oset -from itertools import product -Lx = 3 -Ly = 3 -D = 2 -np.random.seed(3) - -def tensor_compress_bond( - T1, - T2, - reduced=True, - absorb='both', - info=None, - **compress_opts -): - fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) - - site1, site2 = fs[tid1][1], fs[tid2][1] - - if site1 < site2: - Tl, Tr = T1, T2 - tidl, tidr = tid1, tid2 - else: - Tl, Tr = T2, T1 - tidl, tidr = tid2, tid1 - - left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] - right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] - - out = fs._contract_pairs(tidl, tidr, direction="left") - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) - return l, r - -def get_err(max_bond=None): - if max_bond is None: max_bond = 2*D**2 - - - psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) - tsr1 = psi[0,0] - tsr2 = psi[1,0] - - for x in range(Lx): - psi.contract_between((0,x), (1,x)) - tsr1 = psi[0,0] - tsr2 = psi[0,1] - - - inds_contr = [i for i in tsr1.inds if i in tsr2.inds] - outinds = [i for i in tsr1.inds if i not in tsr2.inds] - idxa = [tsr1.inds.index(i) for i in inds_contr] - idxb = [tsr2.inds.index(i) for i in inds_contr] - - out = np.tensordot(tsr1.data, tsr2.data, axes=(idxa, idxb)) - - l, r = tensor_compress_bond(tsr1, tsr2, max_bond=max_bond) - - inds_contr = [i for i in l.inds if i in r.inds] - outinds = [i for i in l.inds if i not in r.inds] - idxa = [l.inds.index(i) for i in inds_contr] - idxb = [r.inds.index(i) for i in inds_contr] - fidx = [i for i in l.inds+r.inds if i not in inds_contr] - - out1 = np.tensordot(l.data, r.data, axes=(idxa, idxb)) - - - nblk = out.shapes.shape[0] - - err = [] - for i in range(nblk): - dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) - j = np.where(dlt==0)[0][0] - ist, ied = out.idxs[i], out.idxs[i+1] - jst, jed = out1.idxs[j], out1.idxs[j+1] - err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) - return max(err) - -def contract_all(tn): - Lx, Ly = tn._Lx, tn._Ly - nsite = Lx * Ly * 2 - fs = tn.fermion_space - for x in range(nsite-1): - out = fs._contract_pairs(0, 1) - return out - - - - -psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) - -ket = psi.copy() - -layer_tags=('KET', 'BRA') - -ket.add_tag(layer_tags[0]) - - -bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) -bra.mangle_inner_("*") -norm = bra & ket - -norm_ur = norm.reorder_upward_column(layer_tags=layer_tags) -out = contract_all(norm_ur) -norm_dl = norm.reorder_downward_column(direction="left", layer_tags=layer_tags) -norm_rd = norm.reorder_right_row(direction="down",layer_tags=layer_tags) -norm_lu = norm.reorder_left_row(direction="up",layer_tags=layer_tags) - - - - -row_envs = norm.compute_row_environments(layer_tags=layer_tags) -print("TESTING ROW ENVIRONMENTS") -for ix in range(Lx): - tmp = row_envs["below", ix].copy() - tmp.add_tensor_network(row_envs["mid", ix]) - tmp.add_tensor_network(row_envs["above", ix]) - fs = tmp.fermion_space - for i in range(len(fs.tensor_order.keys())-1): - out = fs._contract_pairs(0,1) - print("ROW%i env + mid: %.6f"%(ix, out)) - -col_envs = norm.compute_col_environments(layer_tags=layer_tags) -print("TESTING COL ENVIRONMENTS") -for ix in range(Ly): - tmp = col_envs["left", ix].copy() - tmp.add_tensor_network(col_envs["mid", ix]) - tmp.add_tensor_network(col_envs["right", ix]) - fs = tmp.fermion_space - for i in range(len(fs.tensor_order.keys())-1): - out = fs._contract_pairs(0,1) - print("COL%i env + mid: %.6f"%(ix, out)) - -out = contract_all(norm) -print(out) -out_ur = contract_all(norm_ur) -print(out_ur) -out_dl = contract_all(norm_dl) -print(out_dl) -out_rd = contract_all(norm_rd) -print(out_rd) -out_lu = contract_all(norm_lu) -print(out_lu) diff --git a/quimb/tensor/test/test_env_ops.py b/quimb/tensor/test/test_env_ops.py deleted file mode 100644 index f7c24214..00000000 --- a/quimb/tensor/test/test_env_ops.py +++ /dev/null @@ -1,174 +0,0 @@ -import quimb as qu -import numpy as np -from quimb.tensor.tensor_core import rand_uuid -from quimb.tensor.tensor_2d import is_lone_coo -from quimb.tensor.fermion_2d import FermionTensorNetwork2DVector, gen_mf_peps, FPEPS -from quimb.tensor.fermion import FermionTensorNetwork, FermionTensor, tensor_contract, FermionSpace -from quimb.tensor import fermion_ops as ops -import itertools - -def compute_env(psi, max_bond, bra=None, x_bsz=1, y_bsz=1): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - if bra is None: bra = ket.H - bra = bra.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - - norm = ket & bra - envs = norm._compute_plaquette_environments_row_first(x_bsz=x_bsz, y_bsz=y_bsz, layer_tags=layer_tags, max_bond=max_bond) - return envs - -def compute_expectation(env, psi, op, where, max_bond): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - - if is_lone_coo(where): - where = (where,) - else: - where = tuple(where) - - ng = len(where) - site_ix = [bra.site_ind(i, j) for i, j in where] - bnds = [rand_uuid() for _ in range(ng)] - reindex_map = dict(zip(site_ix, bnds)) - TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) - newTG = bra.fermion_space.move_past(TG, (0, len(bra.fermion_space.tensor_order))) - if ng==1: - id = where + ((1,1),) - else: - x_bsz = abs(where[1][0] - where[0][0]) + 1 - y_bsz = abs(where[1][1] - where[0][1]) + 1 - id = (where[0], ) + ((x_bsz, y_bsz),) - if id not in env.keys(): - id = (where[1], ) + ((x_bsz, y_bsz),) - if id not in env.keys(): - raise KeyError("env does not fit with operator") - tn = env[id].copy() - fs = tn.fermion_space - ntsr = len(fs.tensor_order) - for i in range(ntsr-2*ng, ntsr): - tsr = fs[i][2] - if layer_tags[0] in tsr.tags: - tsr.reindex_(reindex_map) - tn.add_tensor(newTG, virtual=True) - out = tn.contract(all, optimize='auto-hq') - return out - -def contract_raw(psi, op, where): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - - if is_lone_coo(where): - where = (where,) - else: - where = tuple(where) - - ng = len(where) - site_ix = [bra.site_ind(i, j) for i, j in where] - bnds = [rand_uuid() for _ in range(ng)] - reindex_map = dict(zip(site_ix, bnds)) - TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) - ket.reindex_(reindex_map) - tn = ket & TG & bra - out = tn.contract(all, optimize='auto-hq') - return out - -def contract_gate(psi, op, where): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - newket = ket.gate(op, where) - tn = newket & bra - out = tn.contract(all, optimize='auto-hq') - return out - -Lx = Ly = 4 -max_bond = 8 -state_array = np.random.randint(0,4, [Lx, Ly]) -psi = gen_mf_peps(state_array) -psi.view_as_(FermionTensorNetwork2DVector, like=psi) - -U = 4. -t = 2. -uop = ops.onsite_u(U) -nop = ops.count_n() -sz = ops.measure_sz() -hop = ops.hopping(t) - -print("testing U") -terms = dict() -for i in range(Lx): - for j in range(Ly): - terms[(i,j)] = uop -exps = psi.compute_local_expectation(terms, return_all=True) - -for ix in range(Lx): - for iy in range(Ly): - where = (ix, iy) - out = exps[where][0] - if state_array[ix,iy]==3: - print(U==out) - else: - print(0.==out) - -print("testing N") -terms = dict() -for i in range(Lx): - for j in range(Ly): - terms[(i,j)] = nop -exps = psi.compute_local_expectation(terms, return_all=True) - -for ix in range(Lx): - for iy in range(Ly): - where = (ix, iy) - out = exps[where][0] - if state_array[ix,iy] ==0: - print(0.==out) - elif state_array[ix, iy] in [1, 2]: - print(1.==out) - else: - print(2.==out) - -print("testing Sz") -terms = dict() -for i in range(Lx): - for j in range(Ly): - terms[(i,j)] = sz -exps = psi.compute_local_expectation(terms, return_all=True) - -for ix in range(Lx): - for iy in range(Ly): - where = (ix, iy) - out = exps[where][0] - if state_array[ix,iy] in [0,3]: - print(0.==out) - elif state_array[ix, iy] ==1: - print(.5==out) - else: - print(-.5==out) - -print("testing hopping") - -Lx = Ly = 3 -psi = FPEPS.rand(Lx, Ly, 1, seed=33) - -psi.view_as_(FermionTensorNetwork2DVector, like=psi) -where = ((1,1),(1,2)) -out = contract_raw(psi, hop, where) -out1 = contract_gate(psi, hop, where) -env = compute_env(psi, max_bond, y_bsz=2) -out2 = compute_expectation(env, psi, hop, where, max_bond) -print(out, out1, out2) diff --git a/quimb/tensor/test/test_exp.py b/quimb/tensor/test/test_exp.py deleted file mode 100644 index 4004bf1b..00000000 --- a/quimb/tensor/test/test_exp.py +++ /dev/null @@ -1,132 +0,0 @@ -import numpy as np -from quimb.tensor import fermion_ops as ops -from pyblock3.algebra.fermion import _pack_flat_tensor, SparseFermionTensor, _unpack_flat_tensor, FlatFermionTensor -from pyblock3.algebra.core import SubTensor -from pyblock3.algebra.symmetry import SZ - -t = 2 -hop = ops.hopping(t) - -def get_state(out): - vecmap = {(SZ(0), 0): "0,", - (SZ(0), 1): "-+,", - (SZ(1), 0): "+,", - (SZ(1), 1): "-,"} - outstring = "" - coeff = 0 - for iblk in out.blocks: - data = np.asarray(iblk) - inds = np.where(abs(data)>0.) - for ia, ib in zip(*inds): - key1 = (iblk.q_labels[0], ia) - key2 = (iblk.q_labels[1], ib) - val = data[ia, ib] - outstring += "+ %.4f|"%(val) + vecmap[key1] + vecmap[key2].replace(',','> ') - if vecmap[key1]+vecmap[key2] == "+,-,": - coeff = val - - if outstring=="": - outstring= "|0>" - return outstring - - -def get_err(out, out1): - nblk = len(out.q_labels) - err = [] - for i in range(nblk): - dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) - j = np.where(dlt==0)[0][0] - ist, ied = out.idxs[i], out.idxs[i+1] - jst, jed = out1.idxs[j], out1.idxs[j+1] - err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) - return max(err) - -tau = 0.1 -tsr = ops.to_exp(hop, -tau) - -sx = SZ(0) -sy = SZ(1) - -blocks=[] -states = np.zeros([2,2]) -states[0,0] = 2**(-.5) -blocks.append(SubTensor(reduced=states, q_labels=(sx, sy))) -blocks.append(SubTensor(reduced=-states, q_labels=(sy, sx))) -# 2**.5 |0+> -2**.5|+0>, eigenstate of hopping(t) - -eval = t -instate = SparseFermionTensor(blocks=blocks) -instring = get_state(instate) -print("Input: ", instring) -outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstring0 = get_state(outstate0) -outstring = get_state(outstate) -print("Output0:", outstring0) -print("Output:", outstring) -print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*2**(-.5))) - -eval = -t -blocks=[] -states = np.zeros([2,2]) -states[0] = .5 -blocks.append(SubTensor(reduced=states, q_labels=(sx, sy))) #0+, 0- -blocks.append(SubTensor(reduced=states.T, q_labels=(sy, sx))) #+0, -0, eigenstate of hopping - -# .5 |0+> + .5 |0-> + .5 |+0> + .5 |-0>, eigenstate of hopping(-t) - -instate = SparseFermionTensor(blocks=blocks) -instring = get_state(instate) -print("Input: ", instring) -outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstring0 = get_state(outstate0) -outstring = get_state(outstate) -print("Output0:", outstring0) -print("Output:", outstring) -print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*(.5))) - - - - -eval = -2*t -blocks=[] -states = np.zeros([2,2]) -states[1,0] = states[0,1] = .5 -blocks.append(SubTensor(reduced=states, q_labels=(sx, sx))) -states = np.zeros([2,2]) -states[1,0] = .5 -states[0,1] =-.5 -blocks.append(SubTensor(reduced=states, q_labels=(sy, sy))) -instate = SparseFermionTensor(blocks=blocks) -# .5 |0,-+> + .5 |-+,0> + .5 |-,+> - .5|+,->, eigenstate (-2) -instring = get_state(instate) -print("Input: ", instring) -outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstring0 = get_state(outstate0) -outstring = get_state(outstate) -print("Output0:", outstring0) -print("Output:", outstring) -print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*(.5))) - -eval= 2*t -blocks=[] -states = np.zeros([2,2]) -states[1,0] = states[0,1] = .5 -blocks.append(SubTensor(reduced=states, q_labels=(sx, sx))) -states = np.zeros([2,2]) -states[1,0] =-.5 -states[0,1] =.5 -blocks.append(SubTensor(reduced=states, q_labels=(sy, sy))) -instate = SparseFermionTensor(blocks=blocks) -# .5 |0,-+> + .5 |-+,0> - .5 |-,+> + .5|+,->, eigenstate (2) -instring = get_state(instate) -print("Input: ", instring) -outstate0 = np.tensordot(hop, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstate = np.tensordot(tsr, instate.to_flat(), axes=((2,3),(0,1))).to_sparse() -outstring0 = get_state(outstate0) -outstring = get_state(outstate) -print("Output0:", outstring0) -print("Output:", outstring) -print("expected coeff: %.4f\n"%(np.e**(eval*-tau)*(.5))) diff --git a/quimb/tensor/test/test_gate_2d.py b/quimb/tensor/test/test_gate_2d.py deleted file mode 100644 index b1a5196c..00000000 --- a/quimb/tensor/test/test_gate_2d.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy as np -from quimb.tensor.fermion_2d import FPEPS, FermionTensorNetwork2DVector -from pyblock3.algebra.symmetry import SZ, BondInfo -from pyblock3.algebra.fermion import (SparseFermionTensor, - FlatFermionTensor) -import time - -def compute_norm(psi, max_bond): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - - bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - - norm = bra & ket - - envs = norm._compute_plaquette_environments_col_first(x_bsz=1, y_bsz=1, layer_tags=layer_tags, max_bond=max_bond) - for key, val in envs.items(): - fs = val.fermion_space - ntsr = len(val.tensor_map) - for i in range(ntsr-1): - out = fs._contract_pairs(0,1) - print("Col:", key, out) - - envs = norm._compute_plaquette_environments_row_first(x_bsz=1, y_bsz=1, layer_tags=layer_tags, max_bond=max_bond) - for key, val in envs.items(): - fs = val.fermion_space - ntsr = len(val.tensor_map) - for i in range(ntsr-1): - out = fs._contract_pairs(0,1) - print("Row:",key, out) - -Lx = Ly = 4 -D = 2 - -np.random.seed(3) -infox = BondInfo({SZ(0,0,0):2, SZ(1,0,0): 2}) -G = SparseFermionTensor.random((infox,infox,infox,infox), dq=SZ(1)).to_flat() -TG = FlatFermionTensor.eye(infox) -psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) -psi.view_as_(FermionTensorNetwork2DVector, like=psi) - -max_bond=None -cutoff = 1e-10 - -site = ((0,0), (0,1)) - -t0 = time.time() -psi0 = psi.gate_(TG,((1,1)), contract=True, absorb=None, max_bond=max_bond, info=dict()) -t1 = time.time() -psi1 = psi.gate(G, site, contract="split", absorb=None, max_bond=max_bond, cutoff=cutoff, info=dict()) -t2 = time.time() -psi2 = psi.gate(G, site, contract="reduce-split", absorb=None, max_bond=max_bond, cutoff=cutoff, info=dict()) -t3 = time.time() -print(t1-t0, t2-t1, t3-t2) - -max_bond = 16 -print("chi=%i"%max_bond) -print("Checking split gate norm") -compute_norm(psi1, max_bond) -print("Checking reduce-split gate norm") -compute_norm(psi2, max_bond) diff --git a/quimb/tensor/test/test_norm.py b/quimb/tensor/test/test_norm.py deleted file mode 100644 index cda140e9..00000000 --- a/quimb/tensor/test/test_norm.py +++ /dev/null @@ -1,163 +0,0 @@ -import quimb as qu -import quimb.tensor as qtn -from quimb.tensor.tensor_2d import PEPS -import numpy as np -from quimb.tensor.fermion_2d import FPEPS -from quimb.tensor.fermion import _fetch_fermion_space, FermionTensor, FermionTensorNetwork -from quimb.tensor.tensor_core import oset -from pyblock3.algebra.core import SubTensor -from pyblock3.algebra.symmetry import SZ -from pyblock3.algebra.fermion import SparseFermionTensor - -Lx = 2 -Ly = 3 -D = 2 -np.random.seed(3) - -def tensor_compress_bond( - T1, - T2, - reduced=True, - absorb='both', - info=None, - **compress_opts -): - fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) - - site1, site2 = fs[tid1][1], fs[tid2][1] - - if site1 < site2: - Tl, Tr = T1, T2 - tidl, tidr = tid1, tid2 - else: - Tl, Tr = T2, T1 - tidl, tidr = tid2, tid1 - - left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] - right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] - - out = fs._contract_pairs(tidl, tidr, direction="left") - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) - return l, r - -def get_err(max_bond=None): - if max_bond is None: max_bond = 2*D**2 - - - psi = FPEPS.rand(Lx, Ly, bond_dim=D, seed=666) - tsr1 = psi[0,0] - tsr2 = psi[1,0] - - for x in range(Lx): - psi.contract_between((0,x), (1,x)) - tsr1 = psi[0,0] - tsr2 = psi[0,1] - - - inds_contr = [i for i in tsr1.inds if i in tsr2.inds] - outinds = [i for i in tsr1.inds if i not in tsr2.inds] - idxa = [tsr1.inds.index(i) for i in inds_contr] - idxb = [tsr2.inds.index(i) for i in inds_contr] - - out = np.tensordot(tsr1.data, tsr2.data, axes=(idxa, idxb)) - - l, r = tensor_compress_bond(tsr1, tsr2, max_bond=max_bond) - - inds_contr = [i for i in l.inds if i in r.inds] - outinds = [i for i in l.inds if i not in r.inds] - idxa = [l.inds.index(i) for i in inds_contr] - idxb = [r.inds.index(i) for i in inds_contr] - fidx = [i for i in l.inds+r.inds if i not in inds_contr] - - out1 = np.tensordot(l.data, r.data, axes=(idxa, idxb)) - - - nblk = out.shapes.shape[0] - - err = [] - for i in range(nblk): - dlt = np.sum(abs(out.q_labels[i] - out1.q_labels), axis=1) - j = np.where(dlt==0)[0][0] - ist, ied = out.idxs[i], out.idxs[i+1] - jst, jed = out1.idxs[j], out1.idxs[j+1] - err.append(max(abs(out.data[ist:ied]-out1.data[jst:jed]))) - return max(err) - -dtype = "complex" -mat1 = np.zeros([2,2], dtype=dtype) -mat1[1,0] = mat1[0,1] = 0.5 -blk = [SubTensor(reduced=mat1, q_labels=(SZ(0), SZ(0)))] -mat1 = np.zeros([2,2],dtype=dtype) -mat1[1,0] = 2**0.5*.5j -blk += [SubTensor(reduced=mat1, q_labels=(SZ(1), SZ(1)))] - -x = FermionTensor(SparseFermionTensor(blocks=blk).to_flat(), inds=["a","b"]) - -y = x.H.data -out = np.tensordot(x.data, y, axes=((0,1),(1,0))) -print(out.data) - -L, R = x.split(left_inds=["a"], get="tensors") - -array = [[L.data,],[R.data,]] - -psi = FPEPS(array, shape="rldpu") #WARNING -ket = psi.copy() -layer_tags=('KET', 'BRA') - -ket.add_tag(layer_tags[0]) -bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) #WARNING -bra.mangle_inner_("*") - -L = ket[0,0] -R = ket[1,0] -L1 = bra[0,0] -R1 = bra[1,0] - -tn = FermionTensorNetwork((R1,L1,L,R)) -fs = tn.fermion_space -fs._contract_pairs(0,1) # WARNING -#fs._contract_pairs(1,2) -fs._contract_pairs(0,1) -out = fs._contract_pairs(0,1) - -norm = bra & ket -def contract_all(tn): - Lx, Ly = tn._Lx, tn._Ly - for i in range(Lx): - for j in range(Ly): - x1, x2 = tn[i,j] - tn.contract_between(x1.tags, x2.tags) - for i in range(Lx): - for j in range(Ly-1): - x1 = tn[i,j] - x2 = tn[i,j+1] - tn.contract_between(x1.tags, x2.tags) - for i in range(Lx-1): - x1 = tn[i,0] - x2 = tn[i+1,0] - out = tn.contract_between(x1.tags, x2.tags) - return out - -def contract_left(tn): - Lx, Ly = tn._Lx, tn._Ly - for i in range(Lx): - for j in range(Ly): - x1, x2 = tn[i,j] - tn.contract_between(x1.tags, x2.tags) - for j in range(Ly): - for i in range(Lx-1): - x1 = tn[i,j] - x2 = tn[i+1,j] - out = tn.contract_between(x1.tags, x2.tags) - for i in range(Ly-1): - x1 = tn[0,i] - x2 = tn[0,i+1] - out = tn.contract_between(x1.tags, x2.tags) - return out - -norm1 = norm.copy() -out = contract_all(norm) -print(out) -out1 = contract_left(norm1) -print(out1) diff --git a/quimb/tensor/test/test_norm_2d.py b/quimb/tensor/test/test_norm_2d.py deleted file mode 100644 index c29eb207..00000000 --- a/quimb/tensor/test/test_norm_2d.py +++ /dev/null @@ -1,72 +0,0 @@ -import quimb as qu -import numpy as np -from quimb.tensor.tensor_core import rand_uuid -from quimb.tensor.tensor_2d import is_lone_coo -from quimb.tensor.fermion_2d import FermionTensorNetwork2DVector, gen_mf_peps -from quimb.tensor.fermion import FermionTensorNetwork, FermionTensor, tensor_contract, FermionSpace -from quimb.tensor import fermion_ops as ops -import itertools - -def compute_env(psi, max_bond, bra=None, x_bsz=1, y_bsz=1): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - if bra is None: bra = ket.H - bra = bra.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - - norm = ket & bra - envs = norm._compute_plaquette_environments_row_first(x_bsz=x_bsz, y_bsz=y_bsz, layer_tags=layer_tags, max_bond=max_bond) - return envs - - -def compute_expectation(env, psi, op, where, max_bond): - ket = psi.copy() - layer_tags=('KET', 'BRA') - - ket.add_tag(layer_tags[0]) - bra = ket.H.retag_({layer_tags[0]: layer_tags[1]}) - bra.mangle_inner_("*") - - if is_lone_coo(where): - where = (where,) - else: - where = tuple(where) - - ng = len(where) - site_ix = [bra.site_ind(i, j) for i, j in where] - bnds = [rand_uuid() for _ in range(ng)] - reindex_map = dict(zip(site_ix, bnds)) - - TG = FermionTensor(op.copy(), inds=site_ix+bnds, left_inds=site_ix) - newTG = bra.fermion_space.move_past(TG, (0, len(bra.fermion_space.tensor_order))) - if ng==1: - tn = env[where+((1,1),)].copy() - fs = tn.fermion_space - ntsr = len(fs.tensor_order) - for i in range(ntsr-2*ng, ntsr): - tsr = fs[i][2] - if layer_tags[0] in tsr.tags: - tsr.reindex_(reindex_map) - tn.add_tensor(newTG, virtual=True) - out = tn.contract(all, optimize='auto-hq') - return out - - - -Lx = Ly = 6 -max_bond = 8 -state_array = np.random.randint(0,4, [Lx, Ly]) -psi = gen_mf_peps(state_array) -psi.view_as_(FermionTensorNetwork2DVector, like=psi) - -U = 4. -t = 2. -uop = ops.onsite_u(U) -nop = ops.count_n() -sz = ops.measure_sz() -hop = ops.hopping(t) - -out = psi.compute_norm(max_bond=max_bond) -print(out) diff --git a/quimb/tensor/test/test_ops.py b/quimb/tensor/test/test_ops.py deleted file mode 100644 index 9a69381f..00000000 --- a/quimb/tensor/test/test_ops.py +++ /dev/null @@ -1,68 +0,0 @@ -import numpy as np -from quimb.tensor import fermion_ops as ops -from quimb.tensor.fermion_2d import gen_mf_peps, FermionTensorNetwork2DVector -from quimb.tensor.fermion import tensor_contract -from pyblock3.algebra.symmetry import SZ -from itertools import product - - -def get_state(out): - vecmap = {(SZ(0), 0): "0,", - (SZ(0), 1): "-+,", - (SZ(1), 0): "+,", - (SZ(1), 1): "-,"} - outstring = "" - for iblk in out.blocks: - data = np.asarray(iblk) - inds = np.where(abs(data)>0.) - for ia, ib in zip(*inds): - key1 = (iblk.q_labels[0], ia) - key2 = (iblk.q_labels[1], ib) - val = data[ia, ib] - outstring += "+ %.1f|"%(val) + vecmap[key1] + vecmap[key2].replace(',','> ') - - if outstring=="": - outstring= "|0>" - return outstring - -max_bond=4 -Lx, Ly = 1,2 - -state_array = np.random.randint(0,4,[Lx,Ly]) - -def test_hopping(ix, iy): - state_array = np.asarray([[ix,iy]]) - psi = gen_mf_peps(state_array, tags=("KET")) - psi.view_as_(FermionTensorNetwork2DVector, like=psi) - umat = ops.onsite_u(4) - nmat = ops.count_n() - zmat = ops.measure_sz() - tmat = ops.gen_h1(1) - - instate = tensor_contract(psi[0,0], psi[0,1]) - - psi1 = psi.gate(tmat.copy(), ((0,0), (0,1)), contract='split') - - outstate = tensor_contract(psi1[0,0], psi1[0,1]) - instring = get_state(instate.data.to_sparse()) - outstring = get_state(outstate.data.to_sparse()) - print("Input:", instring) - print("Output 1:", outstring) - - state = np.tensordot(psi[0,1].data, psi[0,0].data, axes=((0,),(0,))) - outstate = np.tensordot(tmat, state, axes=((2,3),(1,0))).transpose([1,0]) - print("Output 2:",get_state(outstate.to_sparse())) - - outstate = np.tensordot(tmat, state, axes=((2,3),(0,1))) - print("Output 3:",get_state(outstate.to_sparse())) - - psi1 = psi.gate(tmat.copy(), ((0,1), (0,0)), contract='reduce-split') - outstate = tensor_contract(psi1[0,0], psi1[0,1]) - outstring = get_state(outstate.data.to_sparse()) - - print("Output 4:", outstring) - -for ix, iy in product(range(4), repeat=2): - if ix==iy: continue - print("testing %i %i"%(ix, iy)) - test_hopping(ix, iy) From 5547c01372cd9f91c513cec4facdeebf0cd3540e Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 2 Feb 2021 15:23:44 -0800 Subject: [PATCH 30/61] fixup to merge with upstream --- quimb/tensor/fermion_2d.py | 38 ++++++--------------------------- quimb/tensor/fermion_2d_tebd.py | 4 ++-- 2 files changed, 9 insertions(+), 33 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 6ccfe868..8198bbbb 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -10,15 +10,15 @@ PEPS, is_lone_coo, gen_long_range_path, - plaquette_to_sites) + plaquette_to_sites, + calc_plaquette_sizes, + calc_plaquette_map) from .tensor_core import ( rand_uuid, oset, tags_to_oset, bonds ) -from .tensor_2d import calc_plaquette_sizes as _calc_plaquette_sizes -from .tensor_2d import calc_plaquette_map as _calc_plaquette_map from ..utils import check_opt, pairwise from collections import defaultdict from itertools import product @@ -29,30 +29,6 @@ INVERSE_CUTOFF = 1e-10 -def calc_plaquette_sizes(pairs, autogroup=True): - singles = [] - remainders = [] - for pair in pairs: - singles.append(is_lone_coo(pair)) - if not is_lone_coo(pair): - remainders.append(pair) - singles = (sum(singles) != 0) - if singles: - sizes = ((1,1),) - else: - sizes = () - if len(remainders) >0: - sizes += _calc_plaquette_sizes(remainders, autogroup) - return sizes - -def calc_plaquette_map(plaquettes): - plaqs = [p for p in plaquettes if p[1][0] * p[1][1]>1] - map = _calc_plaquette_map(plaqs) - for p in plaquettes: - if p[1][0] * p[1][1]==1: - map[p[0]] = p - return map - def gate_string_split_(TG, where, string, original_ts, bonds_along, reindex_map, site_ix, info, **compress_opts): # by default this means singuvalues are kept in the string 'blob' tensor @@ -720,8 +696,8 @@ def gate( # ╱ ╱ # psi.reindex_(reindex_map) - input_tid, = psi._get_tids_from_inds(bnds, which='any') - isite = psi.tensor_map[input_tid].get_fermion_info()[1] + input_tids = psi._get_tids_from_inds(bnds, which='any') + isite = [psi.tensor_map[itid].get_fermion_info()[1] for itid in input_tids] #psi |= TG psi.fermion_space.add_tensor(TG, virtual=True) @@ -732,7 +708,7 @@ def gate( # pop the sites, contract, then re-add pts = [psi._pop_tensor_(tid) for tid in site_tids] out = tensor_contract(*pts, TG, inplace=True) - psi.fermion_space.move(out.get_fermion_info()[0], isite) + psi.fermion_space.move(out.get_fermion_info()[0], min(isite)) psi |= out return psi @@ -825,7 +801,7 @@ def compute_local_expectation( plaquette_env_options.setdefault('layer_tags', ('KET', 'BRA')) plaquette_envs = dict() - for x_bsz, y_bsz in calc_plaquette_sizes(terms, autogroup): + for x_bsz, y_bsz in calc_plaquette_sizes(terms.keys(), autogroup): plaquette_envs.update(norm.compute_plaquette_environments( x_bsz=x_bsz, y_bsz=y_bsz, **plaquette_env_options)) diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 1df9780c..4424924b 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -5,7 +5,7 @@ from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor from quimb.tensor.fermion_2d import FPEPS,FermionTensorNetwork2DVector -from quimb.tensor.fermion_ops import to_exp, eye, hubbard +from quimb.tensor.fermion_ops import to_exponential, eye, hubbard from pyblock3.algebra.symmetry import SZ, BondInfo from quimb.tensor.tensor_2d_tebd import SimpleUpdate as _SimpleUpdate from quimb.tensor.tensor_2d_tebd import conditioner @@ -134,7 +134,7 @@ def _expm_cached(self, x, y): cache = self._op_cache['expm'] key = (id(x), y) if key not in cache: - out = to_exp(x, y) + out = to_exponential(x, y) cache[key] = out return cache[key] From 6e770d455455b7579112d0885c9eba4dc9067c0b Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 2 Feb 2021 15:24:39 -0800 Subject: [PATCH 31/61] add fermion unittest --- tests/test_fermion/test_fermion_2d.py | 173 ++++++++++++++++++++++++++ tests/test_fermion/test_numerics.py | 122 ++++++++++++++++++ tests/test_fermion/test_ops.py | 125 +++++++++++++++++++ 3 files changed, 420 insertions(+) create mode 100644 tests/test_fermion/test_fermion_2d.py create mode 100644 tests/test_fermion/test_numerics.py create mode 100644 tests/test_fermion/test_ops.py diff --git a/tests/test_fermion/test_fermion_2d.py b/tests/test_fermion/test_fermion_2d.py new file mode 100644 index 00000000..60bfa6fc --- /dev/null +++ b/tests/test_fermion/test_fermion_2d.py @@ -0,0 +1,173 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion_2d import FPEPS +from pyblock3.algebra.fermion import SparseFermionTensor +from pyblock3.algebra.symmetry import SZ, BondInfo + +from quimb.tensor import fermion_ops as ops + + + + +class TestPEPSConstruct: + @pytest.mark.parametrize('where', [ + (0, 0), (0, 1), (0, 2), (2, 0), + (1, 0), (1, 1), (1, 2), (2, 1) + ]) + @pytest.mark.parametrize('contract', [False, True]) + def test_gate_2d_single_site(self, where, contract): + bond = BondInfo({SZ(0):2, SZ(1): 2}) + G = SparseFermionTensor.random((bond, bond)).to_flat() + Lx = 3 + Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2, seed=42, tags='KET') + xe = psi.compute_local_expectation({where: G}) + tn = psi.H & psi.gate(G, where, contract=contract) + assert len(tn.tensors) == 2 * Lx * Ly + int(not contract) + assert tn ^ all == pytest.approx(xe) + + @pytest.mark.parametrize( + 'contract', [False, True, 'split', 'reduce-split']) + @pytest.mark.parametrize('where', [ + [(1, 1), (2, 1)], [(2, 1), (2, 2)] + ]) + def test_gate_2d_two_site(self, where, contract): + bond = BondInfo({SZ(0):2, SZ(1): 2}) + G = SparseFermionTensor.random((bond, bond,bond,bond)).to_flat() + Lx = 3 + Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2, seed=42, tags='KET') + xe = psi.compute_local_expectation({tuple(where): G}) + tn = psi.H & psi.gate(G, tuple(where), contract=contract) + change = {False: 1, True: -1, 'split': 0, 'reduce-split': 0}[contract] + assert len(tn.tensors) == 2 * Lx * Ly + change + assert tn ^ all == pytest.approx(xe) + +class Test2DContract: + def test_contract_2d_one_layer_boundary(self): + psi = FPEPS.rand(4, 4, 2, seed=42, tags='KET') + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=9) + assert xt == pytest.approx(xe, rel=1e-2) + + def test_contract_2d_two_layer_boundary(self): + psi = FPEPS.rand(4, 4, 2, seed=42, tags='KET') + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=9, layer_tags=['KET', 'BRA']) + assert xt == pytest.approx(xe, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_row_envs(self, two_layer): + psi = FPEPS.rand(4, 2, 2, seed=42, tags='KET') + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_row_environments(**compress_opts) + + for i in range(norm.Lx): + norm_i = ( + row_envs['below', i] & + row_envs['mid', i] & + row_envs['above', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_col_envs(self, two_layer): + psi = FPEPS.rand(2, 4, 2, seed=42, tags='KET') + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_col_environments(**compress_opts) + + for i in range(norm.Ly): + norm_i = ( + row_envs['left', i] & + row_envs['mid', i] & + row_envs['right', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + def test_normalize(self): + psi = FPEPS.rand(3, 3, 2, seed=42) + norm = psi.make_norm().contract(all) + assert norm != pytest.approx(1.0) + psi.normalize_(balance_bonds=True, equalize_norms=True, cutoff=2e-3) + norm = psi.make_norm().contract(all) + assert norm == pytest.approx(1.0, rel=1e-2) + + def test_compute_local_expectation_one_sites(self): + peps = FPEPS.rand(4, 3, 2, seed=42) + coos = list(itertools.product([0, 2, 3], [0, 1, 2])) + bond = BondInfo({SZ(0):2, SZ(1): 2}) + terms = {coo: SparseFermionTensor.random((bond, bond)).to_flat() for coo in coos} + + expecs = peps.compute_local_expectation( + terms, + normalized=True, + return_all=True) + + norm = peps.compute_norm() + for where, G in terms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where) + tn = ket & bra + out = tn.contract_boundary(max_bond=12) + assert out == pytest.approx(expecs[where][0], rel=1e-2) + assert norm == pytest.approx(expecs[where][1], rel=1e-2) + + def test_compute_local_expectation_two_sites(self): + normalized=True + peps = FPEPS.rand(4, 3, 2, seed=42) + bond = BondInfo({SZ(0):2, SZ(1): 2}) + Hij = SparseFermionTensor.random((bond, bond, bond, bond)).to_flat() + hterms = {coos: Hij for coos in peps.gen_horizontal_bond_coos()} + vterms = {coos: Hij for coos in peps.gen_vertical_bond_coos()} + + opts = dict(cutoff=2e-3, max_bond=12, contract_optimize='random-greedy') + norm = peps.compute_norm(max_bond=12, cutoff=2e-3) + he = peps.compute_local_expectation( + hterms, normalized=normalized, return_all=True, **opts) + ve = peps.compute_local_expectation( + vterms, normalized=normalized, return_all=True, **opts) + + for where, G in hterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="reduce-split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(he[where][0], rel=1e-2) + assert norm == pytest.approx(he[where][1], rel=1e-2) + + for where, G in vterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(ve[where][0], rel=1e-2) + assert norm == pytest.approx(ve[where][1], rel=1e-2) diff --git a/tests/test_fermion/test_numerics.py b/tests/test_fermion/test_numerics.py new file mode 100644 index 00000000..f94b92c0 --- /dev/null +++ b/tests/test_fermion/test_numerics.py @@ -0,0 +1,122 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion import ( + FermionTensor, FermionTensorNetwork, tensor_contract) + +from quimb.tensor.fermion_2d import ( + FPEPS, ) + +from pyblock3.algebra.symmetry import (SZ, BondInfo) +from pyblock3.algebra.fermion import SparseFermionTensor + +def get_err(A, B): + err = [] + nblk = A.shapes.shape[0] + for i in range(nblk): + dlt = np.sum(abs(A.q_labels[i] - B.q_labels), axis=1) + j = np.where(dlt==0)[0][0] + ist, ied = A.idxs[i], A.idxs[i+1] + jst, jed = B.idxs[j], B.idxs[j+1] + err.append(max(abs(A.data[ist:ied]-B.data[jst:jed]))) + return max(err) + +np.random.seed(3) +bond_1 = BondInfo({SZ(0):3, SZ(1): 2}) +bond_2 = BondInfo({SZ(0):5, SZ(1): 5}) + +abc = SparseFermionTensor.random( + (bond_2, bond_1, bond_1), dq=SZ(1)).to_flat() + +bcd = SparseFermionTensor.random( + (bond_1, bond_1, bond_1), dq=SZ(1)).to_flat() + +ega = SparseFermionTensor.random( + (bond_1, bond_1, bond_2), dq=SZ(1)).to_flat() + +deg = SparseFermionTensor.random( + (bond_1, bond_1, bond_1), dq=SZ(1)).to_flat() + +tsr_abc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) +tsr_ega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) +tsr_bcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) +tsr_deg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + +tn = FermionTensorNetwork((tsr_abc, tsr_ega, tsr_bcd, tsr_deg)) + +# Tensor Order: deg, bcd, ega, abc +# Tensor Order: 3, 2, 1, 0 + +class TestContract: + def test_backend(self): + tsr_egbc = tensor_contract(tsr_abc, tsr_ega, output_inds=("e","g","b", "c")) + + egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) + err = get_err(tsr_egbc.data, egbc) + assert err < 1e-10 + + def test_contract_between(self): + tn1 = tn.copy() + tn1.contract_between("abc", "ega") + tsr_egbc = tn1["abc"].transpose("e","g","b","c") + + egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) + err = get_err(tsr_egbc.data, egbc) + assert err < 1e-10 + + def test_contract_all(self): + result = tn.contract(all) + + egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) + deg1 = np.tensordot(bcd, egbc, axes=[(0,1),(2,3)]) + ref_val = np.tensordot(deg, deg1, axes=[(0,1,2),]*2).data[0] + + err = abs(result - ref_val) + assert err < 1e-10 + + def test_contract_ind(self): + tn1 = tn.copy() + tn1.contract_ind("d") + out = tn1["deg"].transpose("e","g","b","c") + + egbc = np.tensordot(deg, bcd, axes=[(0,),(2)]) + err = get_err(out.data, egbc) + assert err < 1e-10 + +class TestCompress: + def test_backend(self): + pass + + def test_compress_between(self): + pass + +class TestCanonize: + def test_backend(self): + pass + +class TestBalance: + def test_balance_bonds(self): + Lx = Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2, seed=11) + norm = psi.make_norm() + exact = norm.contract(all, optimize="auto-hq") + + psi1 = psi.balance_bonds() + norm = psi1.make_norm() + exact_bb = norm.contract(all, optimize="auto-hq") + assert exact_bb == pytest.approx(exact, rel=1e-2) + for ix, iy in itertools.product(range(Lx), range(Ly)): + assert psi[ix,iy].norm() != pytest.approx(psi1[ix,iy], rel=1e-2) + + def test_equlaize_norm(self): + Lx = Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2, seed=24) + norm = psi.make_norm() + exact = norm.contract(all, optimize="auto-hq") + + psi1 = psi.equalize_norms() + norm = psi1.make_norm() + exact_en = norm.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + for ix, iy in itertools.product(range(Lx), range(Ly)): + assert psi[ix,iy].norm() != pytest.approx(psi1[ix,iy], rel=1e-2) diff --git a/tests/test_fermion/test_ops.py b/tests/test_fermion/test_ops.py new file mode 100644 index 00000000..4d3854f1 --- /dev/null +++ b/tests/test_fermion/test_ops.py @@ -0,0 +1,125 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion_2d import gen_mf_peps, FPEPS +from quimb.tensor import fermion_ops as ops +from pyblock3.algebra.symmetry import SZ +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.fermion import SparseFermionTensor + +Lx = Ly = 6 +np.random.seed(3) +state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) +psi = gen_mf_peps(state_array) + + + +class TestOperators: + def test_hopping(self): + t = 2 + hop = ops.hopping(t) + blocks=[] + states = np.zeros([2,2]) + states[0] = .5 + blocks.append(SubTensor(reduced=states, q_labels=(SZ(0), SZ(1)))) #0+, 0- + blocks.append(SubTensor(reduced=-states.T, q_labels=(SZ(1), SZ(0)))) #+0, -0, eigenstate of hopping + # psi = |0+> + |0-> - |+0> - |-0>, eigenstate of hopping(eigval = t) + ket = SparseFermionTensor(blocks=blocks).to_flat() + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + bra = ket.permute([1,0]).conj() + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(t, rel=1e-2) + + def test_onsite_u(self): + U = 4. + uop = ops.onsite_u(U) + terms = {coo: uop for coo in itertools.product(range(Lx), range(Ly))} + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + for ix, iy in itertools.product(range(Lx), range(Ly)): + ref = U if state_array[ix,iy]==3 else 0. + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_sz(self): + sz = ops.measure_sz() + terms = {coo: sz for coo in itertools.product(range(Lx), range(Ly))} + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + ref_dic = {0:0., 1:0.5, 2:-.5, 3:0.} + for ix, iy in itertools.product(range(Lx), range(Ly)): + state = state_array[ix,iy] + ref = ref_dic[state] + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_n(self): + nop = ops.count_n() + terms = {coo: nop for coo in itertools.product(range(Lx), range(Ly))} + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + ref_dic = {0:0., 1:1, 2:1, 3:2} + for ix, iy in itertools.product(range(Lx), range(Ly)): + state = state_array[ix,iy] + ref = ref_dic[state] + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_hubbard(self): + Lx = Ly = 3 + psi = FPEPS.rand(Lx, Ly, 2) + t = 2. + U = 6. + hop = ops.hopping(t) + uop = ops.onsite_u(U) + full_terms = {(ix, iy): uop for ix, iy in itertools.product(range(Lx), range(Ly))} + hterms = {coos: hop for coos in psi.gen_horizontal_bond_coos()} + vterms = {coos: hop for coos in psi.gen_vertical_bond_coos()} + full_terms.update(hterms) + full_terms.update(vterms) + ene = psi.compute_local_expectation(full_terms, max_bond=12) + + ham = dict() + count_neighbour = lambda i,j: (i>0) + (i0) + (j Date: Wed, 3 Feb 2021 16:29:36 -0800 Subject: [PATCH 32/61] cleanup and add docstring --- quimb/tensor/fermion.py | 695 ++++++++++++++++++++++------------------ 1 file changed, 388 insertions(+), 307 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 681d07b1..aad3c4d7 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -1,23 +1,50 @@ +"""Core Fermionic TensorNetwork Module +Note: The position of Fermionic Tensors inside FermionSpace + is defined as the its distance to the ket vacuum, eg, + for |psi> = \hat{Tx} \hat{Ty} \hat{Tz} |0>, + we have the position for these tensors as + Tx:2 Ty:1 Tz:0 +""" import numpy as np import weakref import functools -from .tensor_core import (Tensor, TensorNetwork, - rand_uuid, tags_to_oset, - _parse_split_opts, - check_opt, - _VALID_SPLIT_GET) +from .tensor_core import (Tensor, TensorNetwork, rand_uuid, tags_to_oset, + _parse_split_opts, check_opt, _VALID_SPLIT_GET) from .tensor_core import tensor_contract as _tensor_contract from ..utils import oset, valmap -from .array_ops import asarray, ndim, transpose +from .array_ops import asarray, ndim +def _contract_connected(T1, T2, output_inds=None): + """Fermionic contraction of two tensors that are adjacent to each other. + Any shared indexes will be summed over. If the input fermionic tensors + do not belong to the same FermionSpace, the first tensor is assumed to + placed after the second tensor, eg \hat{T1} \hat{T2} -def _contract_connected(tsr1, tsr2, out_inds=None): - info1 = tsr1.get_fermion_info() - info2 = tsr2.get_fermion_info() - t1, t2 = tsr1, tsr2 + Parameters + ---------- + T1 : FermionTensor + The first tensor. + T2 : FermionTensor + The second tensor, with matching indices and dimensions to ``T1``. + output_inds : sequence of str + If given, the desired order of output indices, else defaults to the + order they occur in the input indices. + + Returns + ------- + scalar or FermionTensor + """ + info1 = T1.get_fermion_info() + info2 = T2.get_fermion_info() + t1, t2 = T1, T2 if info1 is not None and info2 is not None: - if info1[1] < info2[1]: - t1, t2 = tsr2, tsr1 + site1, site2 = info1[1], info2[1] + if abs(site1-site2) != 1: + raise ValueError("T1 and T2 not adjacently connected in FermionSpace") + if site1 < site2: + # if T1 is placed before T2, + # it shall be parsed as second input to tensordot backend + t1, t2 = T2, T1 ainds, binds = t1.inds, t2.inds _output_inds = [] ax_a, ax_b = [], [] @@ -30,27 +57,27 @@ def _contract_connected(tsr1, tsr2, out_inds=None): for kib, ib in enumerate(binds): if ib not in ainds: _output_inds.append(ib) - if out_inds is None: out_inds=_output_inds - if set(_output_inds) != set(out_inds): + if output_inds is None: output_inds = _output_inds + if set(_output_inds) != set(output_inds): raise TypeError("specified out_inds not allowed in tensordot, \ make sure no summation/Hadamard product appears") out = np.tensordot(t1.data, t2.data, axes=[ax_a, ax_b]) - if len(out_inds)==0: + if len(output_inds)==0: return out.data[0] - if out_inds!=_output_inds: - transpose_order = tuple([_output_inds.index(ia) for ia in out_inds]) + if output_inds!=_output_inds: + transpose_order = tuple([_output_inds.index(ia) for ia in output_inds]) out = out.transpose(transpose_order) - o_tags = oset.union(*(tsr1.tags, tsr2.tags)) - out = FermionTensor(out, inds=out_inds, tags=o_tags) + o_tags = oset.union(*(T1.tags, T2.tags)) + out = FermionTensor(out, inds=output_inds, tags=o_tags) return out -def _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds=None, direction='left'): +def _contract_pairs(fs, tid_or_site1, tid_or_site2, output_inds=None, direction='left'): """ Perform pairwise contraction for two tensors in a specified fermion space. - If the two tensors are not adjacent, move one of the tensors in the given direction. - Note this could alter the tensors that are in between the two tensors in the fermion space + If the two tensors are not adjacent, move one of the tensors in the given direction. + Note this could alter the tensors that are in between the two tensors in the fermion space Parameters ---------- @@ -60,20 +87,20 @@ def _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds=None, direction='le The string that specifies the id for the first tensor or the site for the first tensor tid_or_site2: a string or an integer The string that specifies the id for the 2nd tensor or the site for the 2nd tensor - out_inds: a list of strings + output_inds: a list of strings The list that specifies the output indices and its order direction: string "left" or "right" The direction to move tensors if the two tensors are not adjacent Returns ------- - out : a FermionTensor object or a number + scalar or FermionTensor """ - site1 = fs[tid_or_site1][1] - site2 = fs[tid_or_site2][1] + tid1, site1, tsr1 = fs[tid_or_site1] + tid2, site2, tsr2 = fs[tid_or_site2] - if not fs.is_adjacent(tid_or_site1, tid_or_site2): - fs.make_adjacent(tid_or_site1, tid_or_site2, direction) + if not fs.is_adjacent(tid1, tid2): + fs.make_adjacent(tid1, tid2, direction) if direction=="left": site1 = min(site1, site2) @@ -81,15 +108,13 @@ def _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds=None, direction='le site1 = max(site1, site2) - 1 site2 = site1 + 1 - tsr1 = fs[site1][2] - tsr2 = fs[site2][2] - return _contract_connected(tsr1, tsr2, out_inds) + return _contract_connected(tsr1, tsr2, output_inds) def _fetch_fermion_space(*tensors, inplace=True): """ Retrieve the FermionSpace and the associated tensor_ids for the tensors. - If the given tensors all belong to the same FermionSpace object (fsobj), - the underlying fsobj will be returned. Otherwise, a new fsobj will be created, - and the tensors will be placed in the same order as the input tensor list/tuple. + If the given tensors all belong to the same FermionSpace object (fsobj), + the underlying fsobj will be returned. Otherwise, a new FermionSpace will be created, + and the tensors will be placed in the same order as the input tensors. Parameters ---------- @@ -98,12 +123,6 @@ def _fetch_fermion_space(*tensors, inplace=True): inplace: bool if not true, a new FermionSpace will be created with all tensors copied. so subsequent operations on the fsobj will not alter the input tensors. - tid_or_site2: a string or an integer - The string that specifies the id for the 2nd tensor or the site for the 2nd tensor - out_inds: a list of strings - The list that specifies the output indices and its order - direction: string "left" or "right" - The direction to move tensors if the two tensors are not adjacent Returns ------- @@ -117,7 +136,7 @@ def _fetch_fermion_space(*tensors, inplace=True): fs = tensors[0].fermion_owner[1]() if not inplace: fs = fs.copy() - tid_lst = [tsr.fermion_owner[2] for tsr in tensors] + tid_lst = [tsr.get_fermion_info()[0] for tsr in tensors] else: fs = FermionSpace() for tsr in tensors: @@ -125,11 +144,12 @@ def _fetch_fermion_space(*tensors, inplace=True): tid_lst = list(fs.tensor_order.keys()) return fs, tid_lst -def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False, **contract_opts): - """ Perform tensor contractions for all the given tensors. - If input tensors do not belong to the same underlying fsobj, - the position of each tensor will be the same as its order in the input tensor tuple/list. - Note summation and Hadamard product not supported as it's not well defined for fermionic tensors +def tensor_contract(*tensors, output_inds=None, + direction="left", inplace=False, **contract_opts): + """ Perform tensor contractions for all given tensors. + If input tensors do not belong to the same underlying fsobj, + the position of each tensor will be the same as its order in the input tensor tuple/list. + Summation and Hadamard product not supported as it's not well defined for fermionic tensors Parameters ---------- @@ -138,7 +158,7 @@ def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False, output_inds: a list of strings direction: string "left" or "right" The direction to move tensors if the two tensors are not adjacent - inplace: bool + inplace: bool, optional whether to move/contract tensors in place. Returns @@ -170,8 +190,98 @@ def tensor_contract(*tensors, output_inds=None, direction="left", inplace=False, out = out.transpose(*output_inds, inplace=True) return out -def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=None, cutoff=1e-10, - cutoff_mode='rel', renorm=None, ltags=None, rtags=None, stags=None, bond_ind=None, right_inds=None): +def tensor_split( + T, + left_inds, + method='svd', + get=None, + absorb='both', + max_bond=None, + cutoff=1e-10, + cutoff_mode='rel', + renorm=None, + ltags=None, + rtags=None, + stags=None, + bond_ind=None, + right_inds=None +): + """Decompose this Fermionic tensor into two fermionic tensors. + + Parameters + ---------- + T : FermionTensor + The fermionic tensor to split. + left_inds : str or sequence of str + The index or sequence of inds, which ``T`` should already have, to + split to the 'left'. You can supply ``None`` here if you supply + ``right_inds`` instead. + method : str, optional + How to split the tensor, only some methods allow bond truncation: + + - ``'svd'``: full SVD, allows truncation. + + get : {None, 'arrays', 'tensors', 'values'} + If given, what to return instead of a TN describing the split: + + - ``None``: a tensor network of the two (or three) tensors. + - ``'arrays'``: the raw data arrays (pyblock3.algebra.fermion.FlatFermionTensor) as + a tuple ``(l, r)`` or ``(l, s, r)`` depending on ``absorb``. + - ``'tensors '``: the new tensors as a tuple ``(Tl, Tr)`` or + ``(Tl, Ts, Tr)`` depending on ``absorb``. + - ``'values'``: only compute and return the singular values ``s``. + + absorb : {'both', 'left', 'right', None}, optional + Whether to absorb the singular values into both, the left, or the right + unitary matrix respectively, or neither. If neither (``absorb=None``) + then the singular values will be returned separately as a 2D FermionTensor. + If ``get='tensors'`` or ``get='arrays'`` then a tuple like + ``(left, s, right)`` is returned. + max_bond : None or int + If integer, the maxmimum number of singular values to keep, regardless + of ``cutoff``. + cutoff : float, optional + The threshold below which to discard singular values, only applies to + rank revealing methods (not QR, LQ, or cholesky). + cutoff_mode : {'sum2', 'rel', 'abs', 'rsum2'} + Method with which to apply the cutoff threshold: + + - ``'rel'``: values less than ``cutoff * s[0]`` discarded. + - ``'abs'``: values less than ``cutoff`` discarded. + - ``'sum2'``: sum squared of values discarded must be ``< cutoff``. + - ``'rsum2'``: sum squared of values discarded must be less than + ``cutoff`` times the total sum of squared values. + - ``'sum1'``: sum values discarded must be ``< cutoff``. + - ``'rsum1'``: sum of values discarded must be less than + ``cutoff`` times the total sum of values. + + renorm : {None, bool, or int}, optional + Whether to renormalize the kept singular values, assuming the bond has + a canonical environment, corresponding to maintaining the Frobenius + norm or trace. If ``None`` (the default) then this is automatically + turned on only for ``cutoff_method in {'sum2', 'rsum2', 'sum1', + 'rsum1'}`` with ``method in {'svd', 'eig', 'eigh'}``. + ltags : sequence of str, optional + Add these new tags to the left tensor. + rtags : sequence of str, optional + Add these new tags to the right tensor. + stags : sequence of str, optional + Add these new tags to the singular value tensor. + bond_ind : str, optional + Explicitly name the new bond, else a random one will be generated. + right_inds : sequence of str, optional + Explicitly give the right indices, otherwise they will be worked out. + This is a minor performance feature. + + Returns + ------- + FermionTensorNetwork or tuple[FermionTensor] or tuple[array] or 1D-array + Depending on if ``get`` is ``None``, ``'tensors'``, ``'arrays'``, or + ``'values'``. In the first three cases, if ``absorb`` is set, then the + returned objects correspond to ``(left, right)`` whereas if + ``absorb=None`` the returned objects correspond to + ``(left, singular_values, right)``. + """ check_opt('get', get, _VALID_SPLIT_GET) if left_inds is None: @@ -223,6 +333,24 @@ def tensor_split(T, left_inds, method='svd', get=None, absorb='both', max_bond=N return FermionTensorNetwork(tensors, check_collisions=False, virtual=True) def _compress_connected(Tl, Tr, absorb='both', **compress_opts): + """Compression of two Fermionic tensors that are adjacent to each other. + + Parameters + ---------- + Tl : FermionTensor + The left tensor. + Tr : FermionTensor + The right tensor, with matching indices and dimensions to ``T1``. + absorb : {'both', 'left', 'right', None}, optional + Where to absorb the singular values after decomposition. + compress_opts : + Supplied to :func:`~quimb.tensor.fermion.tensor_split`. + + Returns + ------- + two fermionic Tensors + """ + if Tl.inds == Tr.inds: return Tl, Tr left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] @@ -233,9 +361,11 @@ def _compress_connected(Tl, Tr, absorb='both', **compress_opts): absorb = "right" elif absorb == "right": absorb = "left" - r, l = out.split(left_inds=right_inds, right_inds=left_inds, absorb=absorb, get="tensors", **compress_opts) + r, l = out.split(left_inds=right_inds, right_inds=left_inds, + absorb=absorb, get="tensors", **compress_opts) else: - l, r = out.split(left_inds=left_inds, right_inds=right_inds, absorb=absorb, get="tensors", **compress_opts) + l, r = out.split(left_inds=left_inds, right_inds=right_inds, + absorb=absorb, get="tensors", **compress_opts) return l, r def tensor_compress_bond( @@ -246,17 +376,49 @@ def tensor_compress_bond( info=None, **compress_opts ): + """compress between the two single fermionic tensors. + + Parameters + ---------- + T1 : FermionTensor + The left tensor. + T2 : FermionTensor + The right tensor. + absorb : {'both', 'left', 'right', None}, optional + Where to absorb the singular values after decomposition. + info : None or dict, optional + A dict for returning extra information such as the singular values. + compress_opts : + Supplied to :func:`~quimb.tensor.fermion.tensor_split`. + """ fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=inplace) site1, site2 = fs[tid1][1], fs[tid2][1] fs.make_adjacent(tid1, tid2) l, r = _compress_connected(T1, T2, absorb, **compress_opts) T1.modify(data=l.data, inds=l.inds) T2.modify(data=r.data, inds=r.inds) - fs.move(tid1, site1) - fs.move(tid2, site2) + tid_map = {tid1: site1, tid2:site2} + fs._reorder_from_dict(tid_map) return T1, T2 def _canonize_connected(T1, T2, absorb='right', **split_opts): + """Compression of two Fermionic tensors that are adjacent to each other. + + Parameters + ---------- + T1 : FermionTensor + The left tensor. + T2 : FermionTensor + The right tensor, with matching indices and dimensions to ``T1``. + absorb : {'both', 'left', 'right', None}, optional + Where to absorb the singular values after decomposition. + split_opts : + Supplied to :func:`~quimb.tensor.fermion.tensor_split`. + + Returns + ------- + two fermionic Tensors + """ if absorb == 'both': return _compress_connected(T1, T2, absorb=absorb, **split_opts) if absorb == "left": @@ -279,6 +441,19 @@ def _canonize_connected(T1, T2, absorb='right', **split_opts): return new_T1, new_T2 def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): + r"""Inplace 'canonization' of two fermionic tensors. This gauges the bond + between the two such that ``T1`` is isometric + + Parameters + ---------- + T1 : FermionTensor + The tensor to be isometrized. + T2 : FermionTensor + The tensor to absorb the R-factor into. + split_opts + Supplied to :func:`~quimb.tensor.fermion.tensor_split`, with + modified defaults of ``method=='svd'`` and ``absorb='right'``. + """ check_opt('absorb', absorb, ('left', 'both', 'right')) if absorb == 'both': @@ -291,22 +466,34 @@ def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): l, r = _canonize_connected(T1, T2, absorb, **split_opts) T1.modify(data=l.data, inds=l.inds) T2.modify(data=r.data, inds=r.inds) - fs.move(tid1, site1) - fs.move(tid2, site2) - return T1, T2 + tid_map = {tid1: site1, tid2:site2} + fs._reorder_from_dict(tid_map) def tensor_balance_bond(t1, t2, smudge=1e-6): + """Gauge the bond between two tensors such that the norm of the 'columns' + of the tensors on each side is the same for each index of the bond. + + Parameters + ---------- + t1 : FermionTensor + The first tensor, should share a single index with ``t2``. + t2 : FermionTensor + The second tensor, should share a single index with ``t1``. + smudge : float, optional + Avoid numerical issues by 'smudging' the correctional factor by this + much - the gauging introduced is still exact. + """ from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor ix, = t1.bonds(t2) t1H = t1.H.reindex_({ix: ix+'*'}) t2H = t2.H.reindex_({ix: ix+'*'}) - out = tensor_contract(t1H, t1) - out1 = tensor_contract(t2H, t2) + out1 = _contract_connected(t1H, t1) + out2 = _contract_connected(t2H, t2) sblk1 = [] sblk2 = [] - for iblk1 in out.data.to_sparse(): - for iblk2 in out1.data.to_sparse(): + for iblk1 in out1.data.to_sparse(): + for iblk2 in out2.data.to_sparse(): if iblk1.q_labels != iblk2.q_labels: continue x = np.diag(np.asarray(iblk1)) @@ -322,22 +509,27 @@ def tensor_balance_bond(t1, t2, smudge=1e-6): class FermionSpace: """A labelled, ordered dictionary. The tensor labels point to the tensor - and its position inside the fermion space. + and its position inside the fermion space. Parameters ---------- - tensor_order : dictionary + tensor_order : dictionary, optional tensor_order[tid] = (tensor, site) + virtual: bool, optional + whether the FermionSpace should be a *view* onto the tensors it is + given, or a copy of them. + + Attributes + ---------- + tensor_map : dict + Mapping of unique ids to tensors and its location, like``{tensor_id: (tensor, site) ...}``. I.e. this is where the tensors are 'stored' by the FermionSpace. """ def __init__(self, tensor_order=None, virtual=True): self.tensor_order = {} if tensor_order is not None: - if virtual: - self.tensor_order = tensor_order - else: - for tid, (tsr, site) in tensor_order.items(): - self.add_tensor(tsr, tid, site, virtual=virtual) + for tid, (tsr, site) in tensor_order.items(): + self.add_tensor(tsr, tid, site, virtual=virtual) @property def sites(self): @@ -360,36 +552,26 @@ def is_continuous(self): return len(sites) == (max(sites)-min(sites)+1) def copy(self): - """ Copy the Fermion Space object. Tensor_ids and positions will be - preserved and tensors will be copied - """ - new_fs = FermionSpace(self.tensor_order, virtual=False) - return new_fs - - def to_tensor_network(self, site_lst=None): - """ Construct a inplace FermionTensorNetwork obj with tensors at given sites + """ Copy the FermionSpace object. Tensor ids and positions will be + preserved and tensors will be copied """ - if site_lst is None: - tsrs = tuple([tsr for (tsr, _) in self.tensor_order.values()]) - else: - tsrs = tuple([tsr for (tsr, site) in self.tensor_order.values() if site in site_lst]) - return FermionTensorNetwork(tsrs, virtual=True) + return FermionSpace(self.tensor_order, virtual=False) def add_tensor(self, tsr, tid=None, site=None, virtual=False): """ Add a tensor to the current FermionSpace, eg - 01234 0123456 - XXXXX, (6, B) -> XXXXX-B + 01234 0123456 + XXXXX, (6, B) -> XXXXX-B Parameters ---------- - tsr : FermionTensor obj - The desired output sequence of indices. + tsr : FermionTensor + The fermionic tensor to operate on tid : string, optional - The desired tensor label + tensor id site: int or None, optional The position to place the tensor. Tensor will be - appended if not specified - virtual: bool + appended to last position if not specified + virtual: bool, optional whether to add the tensor inplace """ @@ -406,10 +588,21 @@ def add_tensor(self, tsr, tid=None, site=None, virtual=False): def replace_tensor(self, site, tsr, tid=None, virtual=False): """ Replace the tensor at a given site, eg - 0123456789 0123456789 - XXXXAXXXXX, (4, B) -> XXXXBXXXXX + 0123456789 0123456789 + XXXXAXXXXX, (4, B) -> XXXXBXXXXX + + Parameters + ---------- + site: int + The position to replace the tensor + tsr : FermionTensor + The fermionic tensor to operate on + tid : string, optional + rename a new tensor id if provided + virtual: bool, optional + whether to replace the tensor inplace """ - atid, site, atsr = self[site] + atid, _, atsr = self[site] T = tsr if virtual else tsr.copy() if tid is None or (tid in self.tensor_order.keys() and tid != atid): tid = atid @@ -421,9 +614,20 @@ def replace_tensor(self, site, tsr, tid=None, virtual=False): def insert_tensor(self, site, tsr, tid=None, virtual=False): """ insert a tensor at a given site, all tensors afterwards - will be shifted by 1 to the right, eg, - 012345678 0123456789 - ABCDEFGHI, (4, X) -> ABCDXEFGHI + will be shifted forward by 1, eg, + 012345678 0123456789 + ABCDEFGHI, (4, X) -> ABCDXEFGHI + + Parameters + ---------- + site: int + The position to insert the tensor + tsr : FermionTensor + The fermionic tensor to operate on + tid : string, optional + rename a new tensor id if provided + virtual: bool, optional + whether to insert the tensor inplace """ if (tid is None) or (tid in self.tensor_order.keys()): tid = rand_uuid(base="_T") @@ -438,13 +642,32 @@ def insert_tensor(self, site, tsr, tid=None, virtual=False): self.tensor_order.update({tid: (T, site)}) - def insert(self, site, *tsr, virtual=False): - for T in tsr: + def insert(self, site, *tsrs, virtual=False): + """ insert a group of tensors at a given site, all tensors afterwards + will be shifted forward accordingly, eg, + 0123456 0123456789 + ABCDEFG, (4, (X,Y,Z)) -> ABCDXYZEFG + + Parameters + ---------- + site: int + The position to begin inserting the tensor + tsrs : a tuple/list of FermionTensor + The fermionic tensors to operate on + virtual: bool, optional + whether to insert the tensors inplace + """ + for T in tsrs: self.insert_tensor(site, T, virtual=virtual) site += 1 def get_tid(self, site): """ Return the tensor id at given site + + Parameters + ---------- + site: int + The position to obtain the tensor id """ if site not in self.sites: raise KeyError("site:%s not occupied"%site) @@ -452,22 +675,33 @@ def get_tid(self, site): return list(self.tensor_order.keys())[idx] def _reorder_from_dict(self, tid_map): + """ Reorder tensors from a tensor_id/position mapping. + Pizorn algorithm will be applied during moving + + Parameters + ---------- + tid_map: dictionary + Mapping of tensor id to the desired location + """ + tid_lst = list(tid_map.keys()) des_sites = list(tid_map.values()) + # sort the destination sites to avoid cross-overs during moving work_des_sites = sorted(des_sites)[::-1] for isite in work_des_sites: ind = des_sites.index(isite) self.move(tid_lst[ind], isite) - def is_adjacent(self, tid_or_site1, tid_or_site2): + def is_adjacent(self, tid1, tid2): """ Check whether two tensors are adjacently placed in the space """ - site1 = self[tid_or_site1][1] - site2 = self[tid_or_site2][1] - distance = abs(site1-site2) - return distance == 1 + site1 = self.tensor_order[tid1][1] + site2 = self.tensor_order[tid2][1] + return abs(site1-site2) == 1 def __getitem__(self, tid_or_site): + """Return a tuple of (tensor id, position, tensor) from the tag (tensor id or position) + """ if isinstance(tid_or_site, str): if tid_or_site not in self.tensor_order.keys(): raise KeyError("tid:%s not found"%tid_or_site) @@ -489,8 +723,17 @@ def __setitem__(self, site, tsr): self.add_tensor(site, tsr) def move(self, tid_or_site, des_site): - '''Both local and global phase factorized to the tensor that's being operated on - ''' + """ Move a tensor inside this FermionSpace to the specified position with Pizorn algorithm. + Both local and global phase will be factorized to this single tensor + + Parameters + ---------- + tid_or_site: string or int + id or position of the original tensor + des_site: int + the position to move the tensor to + """ + tid, site, tsr = self[tid_or_site] if site == des_site: return move_left = (des_site < site) @@ -513,8 +756,20 @@ def move(self, tid_or_site, des_site): self.tensor_order[tid] = (tsr, des_site) def move_past(self, tsr, site_range=None): + """ Move an external tensor past the specifed site ranges with Pizorn algorithm. + Both local and global phase will be factorized to the external tensor. + The external tensor will not be added to this FermionSpace + + Parameters + ---------- + tsr: FermionTensor + the external + site_range: a tuple of integers, optional + the range of the tensors to move past, if not specified, will be the whole space + """ if site_range is None: - site_range = (0, len(self.tensor_order)) + sites = self.sites + site_range = (min(sites), max(sites)+1) start, end = site_range iterator = range(start, end) shared_inds = [] @@ -530,12 +785,12 @@ def move_past(self, tsr, site_range=None): if len(axes)>0: tsr.data._local_flip(axes) return tsr - def make_adjacent(self, tid_or_site1, tid_or_site2, direction='left'): + def make_adjacent(self, tid1, tid2, direction='left'): """ Move one tensor in the specified direction to make the two adjacent """ - if not self.is_adjacent(tid_or_site1, tid_or_site2): - site1 = self[tid_or_site1][1] - site2 = self[tid_or_site2][1] + if not self.is_adjacent(tid1, tid2): + site1 = self.tensor_order[tid1][1] + site2 = self.tensor_order[tid2][1] if site1 == site2: return sitemin, sitemax = min(site1, site2), max(site1, site2) if direction == 'left': @@ -551,19 +806,19 @@ def _contract_pairs(self, tid_or_site1, tid_or_site2, out_inds=None, direction=' Parameters ---------- tid_or_site1 : string or int - Tensor_id or position for the 1st tensor + Tensor id or position for the 1st tensor tid_or_site2 : string or int - Tensor_id or position for the 2nd tensor + Tensor id or position for the 2nd tensor out_inds: list of string, optional The order for the desired output indices direction: string The direction to move tensors if the two are not adjacent inplace: bool - Whether to contract/move tensors inplace or in a copied fermionspace + Whether to contract/move tensors inplace or in a copied FermionSpace Returns ------- - out : a FermionTensor object or a number + scalar or a FermionTensor """ fs = self if inplace else self.copy() out = _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds, direction) @@ -585,93 +840,34 @@ def _contract_pairs(self, tid_or_site1, tid_or_site2, out_inds=None, direction=' return out - def remove_tensor(self, tid_or_site, inplace=True): + def remove_tensor(self, tid_or_site): """ remove a specified tensor at a given site, eg - 012345 01234 - ABCDEF, (3, True) -> ABCEF - - 012345 012345 - ABCDEF, (3, False) -> ABC-EF + 012345 01234 + ABCDEF, (3, True) -> ABCEF """ tid, site, tsr = self[tid_or_site] tsr.remove_fermion_owner() del self.tensor_order[tid] - if inplace: - indent_sites = [] - for isite in self.sites: - if isite > site: - indent_sites.append(isite) - indent_sites = sorted(indent_sites) - tid_lst = [self.get_tid(isite) for isite in indent_sites] - for tid in tid_lst: - tsr, site = self.tensor_order[tid] - self.tensor_order[tid] = (tsr, site-1) - - def compress_space(self): - """ if the space is not continously occupied, compress it, eg, - 012345678 01234 - -A--B-CDE -> ABCDE - """ - sites = self.sites - if min(sites) ==0 and self.is_continuous(): - return - for tid, (tsr, site) in self.tensor_order.items(): - isite = sum(sites site: + indent_sites.append(isite) + indent_sites = sorted(indent_sites) + tid_lst = [self.get_tid(isite) for isite in indent_sites] + for tid in tid_lst: + tsr, site = self.tensor_order[tid] + self.tensor_order[tid] = (tsr, site-1) @property def H(self): - """ Construct a FermionSpace for the ket state of the tensors + """ Construct a FermionSpace for the bra state of the tensors """ max_site = max(self.sites) new_fs = FermionSpace() for tid, (tsr, site) in self.tensor_order.items(): T = tsr.copy() reverse_order = list(range(tsr.ndim))[::-1] - x = T.data.permute(reverse_order) new_data = T.data.permute(reverse_order).conj() new_inds = T.inds[::-1] T.modify(data=new_data, inds=new_inds) @@ -686,7 +882,7 @@ class FermionTensor(Tensor): Parameters ---------- - data : numpy.ndarray + data : pyblock3.algebra.fermion.FlatFermionTensor The n-dimensional data. inds : sequence of str The index labels for each dimension. Must match the number of @@ -699,24 +895,18 @@ class FermionTensor(Tensor): matrix. This can be useful, for example, when automatically applying unitary constraints to impose a certain flow on a tensor network but at the atomistic (Tensor) level. - fermion_owner: a tuple with mixed data type, optional - (hash value, fsobj weak reference, tensor_id). The first one is the hash - value (int) of the fsobj it's point to. The second is the weak reference - to the fsobj, and the third is the tensor_id(string) for its label """ - def __init__(self, data=1.0, inds=(), tags=None, left_inds=None, fermion_owner=None): + def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): # a new or copied Tensor always has no owners self._owners = dict() - + self._fermion_owner = None # Short circuit for copying Tensors if isinstance(data, self.__class__): self._data = data.data.copy() self._inds = data.inds self._tags = data.tags.copy() self._left_inds = data.left_inds - # copied Fermion Tensor points to no fermion space - self._fermion_owner = None return self._data = data # asarray(data) @@ -734,8 +924,6 @@ def __init__(self, data=1.0, inds=(), tags=None, left_inds=None, fermion_owner=N raise ValueError(f"The 'left' indices {self.left_inds} are not " f"found in {self.inds}.") - self._fermion_owner = fermion_owner - @property def fermion_owner(self): return self._fermion_owner @@ -898,7 +1086,7 @@ def squeeze(self, inplace=False): def norm(self): """Frobenius norm of this tensor. """ - return np.linalg.norm(self.data.data) + return np.linalg.norm(self.data.data, 2) def symmetrize(self, ind1, ind2, inplace=False): raise NotImplementedError @@ -1346,24 +1534,10 @@ def partition(self, tags, which='any', inplace=False): tagged_tids = self._get_tids_from_tags(tags, which=which) kws = {'check_collisions': False} - - if inplace: - t1 = self - t2s = [t1._pop_tensor_(tid) for tid in tagged_tids] - t2 = FermionTensorNetwork(t2s, **kws) - t2.view_like_(self) - - else: # rebuild both -> quicker - new_fs = self.fermion_space.copy() - t1_site = [] - t2_site = [] - for tid in self.tensor_map.keys(): - (t2_site if tid in tagged_tids else t1_site).append(self.fermion_space[tid][1]) - t1 = new_fs.to_tensor_network(t1_site) - t2 = new_fs.to_tensor_network(t2_site) - t1.view_like_(self) - t2.view_like_(self) - + t1 = self if inplace else self.copy() + t2s = [t1._pop_tensor_(tid) for tid in tagged_tids] + t2 = FermionTensorNetwork(t2s, **kws) + t2.view_like_(self) return t1, t2 def replace_with_svd(self, where, left_inds, eps, *, which='any', @@ -1371,100 +1545,7 @@ def replace_with_svd(self, where, left_inds, eps, *, which='any', absorb='both', cutoff_mode='rel', renorm=None, ltags=None, rtags=None, keep_tags=True, start=None, stop=None, inplace=False): - r"""Replace all tensors marked by ``where`` with an iteratively - constructed SVD. E.g. if ``X`` denote ``where`` tensors:: - - :__ ___: - ---X X--X X--- : \ / : - | | | | ==> : U~s~VH---: - ---X--X--X--X--- :__/ \ : - | +--- : \__: - X left_inds : - right_inds - - Parameters - ---------- - where : tag or seq of tags - Tags specifying the tensors to replace. - left_inds : ind or sequence of inds - The indices defining the left hand side of the SVD. - eps : float - The tolerance to perform the SVD with, affects the number of - singular values kept. See - :func:`quimb.linalg.rand_linalg.estimate_rank`. - which : {'any', 'all', '!any', '!all'}, optional - Whether to replace tensors matching any or all the tags ``where``, - prefix with '!' to invert the selection. - right_inds : ind or sequence of inds, optional - The indices defining the right hand side of the SVD, these can be - automatically worked out, but for hermitian decompositions the - order is important and thus can be given here explicitly. - method : str, optional - How to perform the decomposition, if not an iterative method - the subnetwork dense tensor will be formed first, see - :func:`~quimb.tensor.tensor_core.tensor_split` for options. - max_bond : int, optional - The maximum bond to keep, defaults to no maximum (-1). - ltags : sequence of str, optional - Tags to add to the left tensor. - rtags : sequence of str, optional - Tags to add to the right tensor. - keep_tags : bool, optional - Whether to propagate tags found in the subnetwork to both new - tensors or drop them, defaults to ``True``. - start : int, optional - If given, assume can use ``TNLinearOperator1D``. - stop : int, optional - If given, assume can use ``TNLinearOperator1D``. - inplace : bool, optional - Perform operation in place. - - Returns - ------- - - See Also - -------- - replace_with_identity - """ - leave, svd_section = self.partition(where, which=which, - inplace=inplace) - - tags = svd_section.tags if keep_tags else oset() - ltags = tags_to_oset(ltags) - rtags = tags_to_oset(rtags) - - if right_inds is None: - # compute - right_inds = tuple(i for i in svd_section.outer_inds() - if i not in left_inds) - - if (start is None) and (stop is None): - A = svd_section.aslinearoperator(left_inds=left_inds, - right_inds=right_inds) - else: - from .tensor_1d import TNLinearOperator1D - - # check if need to invert start stop as well - if '!' in which: - start, stop = stop, start + self.L - left_inds, right_inds = right_inds, left_inds - ltags, rtags = rtags, ltags - - A = TNLinearOperator1D(svd_section, start=start, stop=stop, - left_inds=left_inds, right_inds=right_inds) - - ltags = tags | ltags - rtags = tags | rtags - - TL, TR = tensor_split(A, left_inds=left_inds, right_inds=right_inds, - method=method, cutoff=eps, absorb=absorb, - max_bond=max_bond, cutoff_mode=cutoff_mode, - renorm=renorm, ltags=ltags, rtags=rtags) - - leave |= TL - leave |= TR - - return leave + raise NotImplementedError def contract_between(self, tags1, tags2, **contract_opts): """Contract the two tensors specified by ``tags1`` and ``tags2`` From 24867b3475d850f09073570af6f81583b0b515eb Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 4 Feb 2021 15:22:58 -0800 Subject: [PATCH 33/61] bugfix, cleanup and adding docstring --- quimb/tensor/fermion.py | 124 ++++++++++++++++++++++++------------- quimb/tensor/fermion_2d.py | 29 +++++---- 2 files changed, 95 insertions(+), 58 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index aad3c4d7..31805e7b 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -129,18 +129,32 @@ def _fetch_fermion_space(*tensors, inplace=True): fs : a FermionSpace object tid_lst: a list of strings for the tensor_ids """ - if isinstance(tensors, FermionTensor): + if isinstance(tensors, (FermionTensor, FermionTensorNetwork)): tensors = (tensors, ) if is_mergeable(*tensors): - fs = tensors[0].fermion_owner[1]() + if isinstance(tensors[0], FermionTensor): + fs = tensors[0].fermion_owner[1]() + else: + fs = tensors[0].fermion_space if not inplace: fs = fs.copy() - tid_lst = [tsr.get_fermion_info()[0] for tsr in tensors] + tid_lst = [] + for tsr_or_tn in tensors: + if isinstance(tsr_or_tn, FermionTensor): + tid_lst.append(tsr_or_tn.get_fermion_info()[0]) + else: + tid_lst.append(tsr_or_tn.tensor_map.keys()) else: fs = FermionSpace() - for tsr in tensors: - fs.add_tensor(tsr, virtual=inplace) + for tsr_or_tn in tensors: + if isinstance(tsr_or_tn, FermionTensor): + fs.add_tensor(tsr_or_tn, virtual=inplace) + elif isinstance(tsr_or_tn, FermionTensorNetwork): + if not tsr_or_tn.is_continuous(): + raise ValueError("Input Network not continous, merge not allowed") + for itsr in tsr_or_tn: + fs.add_tensor(itsr, virtual=inplace) tid_lst = list(fs.tensor_order.keys()) return fs, tid_lst @@ -966,7 +980,7 @@ def multiply_index_diagonal(self, ind, x, inplace=False, location="front"): """ if location not in ["front", "back"]: raise ValueError("invalid for the location of the diagonal") - t = self if inplace else self.copy() + t = self if inplace else self.copy(full=True) ax = t.inds.index(ind) if isinstance(x, FermionTensor): x = x.data @@ -1027,7 +1041,9 @@ def split(self, *args, **kwargs): return tensor_split(self, *args, **kwargs) def transpose(self, *output_inds, inplace=False): - """Transpose this tensor. + """Transpose this tensor. This does not change the physical meaning of + the operator represented, eg: + T_{abc}a^{\dagger}b^{\dagger}c^{\dagger} = \tilda{T}_{cab}c^{\dagger}a^{\dagger}b^{\dagger} Parameters ---------- @@ -1063,8 +1079,9 @@ def transpose(self, *output_inds, inplace=False): @property def H(self): - """Return the ket of this tensor, this is different from Fermionic transposition - U_{abc} a^{\dagger}b^{\dagger}c^{\dagger} -> U^{cba\star}cba + """Return the ket of this tensor, eg: + U_{abc} a^{\dagger}b^{\dagger}c^{\dagger} -> U^{cba\star}cba + Note this is different from Fermionic transposition """ axes = list(range(self.ndim))[::-1] data = self.data.permute(axes).conj() @@ -1190,8 +1207,13 @@ def __or__(self, other): """ return FermionTensorNetwork((self, other), virtual=True) + def __iter__(self): + sorted_sites = sorted(self.filled_sites) + for isite in sorted_sites: + yield self.fermion_space[isite][2] + def _reorder_from_tid(self, tid_map, inplace=False): - tn = self if inplace else self.copy() + tn = self if inplace else self.copy(full=True) tn.fermion_space._reorder_from_dict(tid_map) return tn @@ -1208,7 +1230,7 @@ def balance_bonds(self, inplace=False): ------- TensorNetwork """ - tn = self if inplace else self.copy() + tn = self if inplace else self.copy(full=True) for ix, tids in tn.ind_map.items(): if len(tids) != 2: @@ -1286,8 +1308,14 @@ def add_tensor_network(self, tn, virtual=False, check_collisions=True): if not tn.is_continuous(): raise ValueError("input tensor network is not contiguously ordered") - filled_sites = tn.filled_sites - sorted_sites = sorted(filled_sites) + sorted_tensors = [] + for tsr in tn: + tid = tsr.get_fermion_info()[0] + sorted_tensors.append([tid, tsr]) + # if inplace, fermion_owners need to be removed first to avoid conflicts + if virtual: + for tid, tsr in sorted_tensors: + tsr.remove_fermion_owner() if check_collisions: # add tensors individually if getattr(self, '_inner_inds', None) is None: @@ -1306,15 +1334,13 @@ def add_tensor_network(self, tn, virtual=False, check_collisions=True): self._inner_inds.update(other_inner_ix) # add tensors, reindexing if necessary - for site in sorted_sites: - tid, _, tsr = tn.fermion_space[site] + for tid, tsr in sorted_tensors: if clash_ix and any(i in reind for i in tsr.inds): tsr = tsr.reindex(reind, inplace=virtual) self.add_tensor(tsr, tid=tid, virtual=virtual) else: # directly add tensor/tag indexes - for site in sorted_sites: - tid, _, tsr = tn.fermion_space[site] + for tid, tsr in sorted_tensors: self.add_tensor(tsr, tid=tid, virtual=virtual) def add(self, t, virtual=False, check_collisions=True): @@ -1363,7 +1389,10 @@ def __ior__(self, tensor): """Inplace, virtual, addition of a Tensor or TensorNetwork to this network. It should not have any conflicting indices. """ - self.add(tensor, virtual=True) + if is_mergeable(self, tensor): + self.assemble(tensor) + else: + self.add(tensor, virtual=True) return self # ------------------------------- Methods ------------------------------- # @@ -1388,17 +1417,22 @@ def is_continuous(self): if len(filled_sites) ==0 : return True return (max(filled_sites) - min(filled_sites) + 1) == len(filled_sites) - def copy(self): - """ Tensors and underlying FermionSpace(all tensors in it) will - be copied + def copy(self, full=False): + """ For full copy, the tensors and underlying FermionSpace(all tensors in it) will + be copied. For partial copy, the tensors in this network must be continuously + placed and a new FermionSpace will be created to hold this continous sector. """ - return self.__class__(self, virtual=False) + if full: + return self.__class__(self, virtual=False) + else: + if not self.is_continuous(): + raise TypeError("Tensors not continuously placed in the network, \ + partial copy not allowed") + newtn = FermionTensorNetwork([]) + newtn.add_tensor_network(self) + newtn.view_like_(self) + return newtn - def simple_copy(self): - newtn = FermionTensorNetwork([]) - newtn.add_tensor_network(self) - newtn.view_like_(self) - return newtn def _pop_tensor(self, tid, remove_from_fs=True): """Remove a tensor from this network, returning said tensor. @@ -1423,7 +1457,7 @@ def _pop_tensor(self, tid, remove_from_fs=True): @property def H(self): - tn = self.copy() + tn = self.copy(full=True) fs = tn.fermion_space max_site = max(fs.sites) @@ -1452,7 +1486,6 @@ def __itruediv__(self, other): # ----------------- selecting and splitting the network ----------------- # - def __setitem__(self, tags, tensor): #TODO: FIXME """Set the single tensor uniquely associated with ``tags``. @@ -1463,8 +1496,8 @@ def __setitem__(self, tags, tensor): "existing tensor only - found {} with tag(s) '{}'." .format(len(tids), tags)) - if not isinstance(tensor, Tensor): - raise TypeError("Can only set value with a new 'Tensor'.") + if not isinstance(tensor, FermionTensor): + raise TypeError("Can only set value with a new 'FermionTensor'.") tid, = tids site = self.fermion_space.tensor_order[tid][1] @@ -1474,7 +1507,8 @@ def __setitem__(self, tags, tensor): def partition_tensors(self, tags, inplace=False, which='any'): """Split this TN into a list of tensors containing any or all of - ``tags`` and a ``TensorNetwork`` of the the rest. + ``tags`` and a ``FermionTensorNetwork`` of the the rest. + The tensors and FermionTensorNetwork remain in the same FermionSpace Parameters ---------- @@ -1489,8 +1523,8 @@ def partition_tensors(self, tags, inplace=False, which='any'): Returns ------- - (u_tn, t_ts) : (TensorNetwork, tuple of Tensors) - The untagged tensor network, and the sequence of tagged Tensors. + (u_tn, t_ts) : (FermionTensorNetwork, tuple of FermionTensors) + The untagged fermion tensor network, and the sequence of tagged Tensors. See Also -------- @@ -1503,7 +1537,7 @@ def partition_tensors(self, tags, inplace=False, which='any'): return None, self.tensor_map.values() # Copy untagged to new network, and pop tagged tensors from this - untagged_tn = self if inplace else self.copy() + untagged_tn = self if inplace else self.copy(full=True) tagged_ts = tuple(map(untagged_tn._pop_tensor_, sorted(tagged_tids))) return untagged_tn, tagged_ts @@ -1524,7 +1558,7 @@ def partition(self, tags, which='any', inplace=False): Returns ------- - untagged_tn, tagged_tn : (TensorNetwork, TensorNetwork) + untagged_tn, tagged_tn : (FermionTensorNetwork, FermionTensorNetwork) The untagged and tagged tensor networs. See Also @@ -1534,9 +1568,9 @@ def partition(self, tags, which='any', inplace=False): tagged_tids = self._get_tids_from_tags(tags, which=which) kws = {'check_collisions': False} - t1 = self if inplace else self.copy() + t1 = self if inplace else self.copy(full=True) t2s = [t1._pop_tensor_(tid) for tid in tagged_tids] - t2 = FermionTensorNetwork(t2s, **kws) + t2 = FermionTensorNetwork(t2s, virtual=True, **kws) t2.view_like_(self) return t1, t2 @@ -1559,7 +1593,7 @@ def contract_between(self, tags1, tags2, **contract_opts): tags2 : str or sequence of str Tags uniquely identifying the second tensor. contract_opts - Supplied to :func:`~quimb.tensor.tensor_core.tensor_contract`. + Supplied to :func:`~quimb.tensor.fermion.tensor_contract`. """ tid1, = self._get_tids_from_tags(tags1, which='all') tid2, = self._get_tids_from_tags(tags2, which='all') @@ -1620,7 +1654,7 @@ def contract(self, tags=..., inplace=False, **opts): # this checks whether certain TN classes have a manually specified # contraction pattern (e.g. 1D along the line) if self._CONTRACT_STRUCTURED: - raise NotImplementedError("structured contraction not implemented") + raise NotImplementedError() # else just contract those tensors specified by tags. return self.contract_tags(tags, inplace=inplace, **opts) @@ -1639,9 +1673,13 @@ def _compress_between_tids( Tr = self.tensor_map[tid2] tensor_compress_bond(Tl, Tr, inplace=True, **compress_opts) - if equalize_norms: + if canonize_distance: raise NotImplementedError + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + def _canonize_between_tids( self, tid1, @@ -1654,7 +1692,8 @@ def _canonize_between_tids( tensor_canonize_bond(Tl, Tr, **canonize_opts) if equalize_norms: - raise NotImplementedError + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) def replace_section_with_svd(self, start, stop, eps, **replace_with_svd_opts): @@ -1675,6 +1714,5 @@ def cut_bond(self, bnd, left_ind, right_ind): def cut_between(self, left_tags, right_tags, left_ind, right_ind): raise NotImplementedError - def cut_iter(self, *inds): raise NotImplementedError diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 8198bbbb..7b5f6309 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -305,35 +305,35 @@ def compute_row_environments(self, dense=False, **compress_opts): # upwards pass row_envs['below', 0] = FermionTensorNetwork([]) first_row = self.row_tag(0) - row_envs['mid', 0] = env_bottom.select(first_row).simple_copy() + row_envs['mid', 0] = env_bottom.select(first_row).copy() if dense: env_bottom ^= first_row - row_envs['below', 1] = env_bottom.select(first_row).simple_copy() + row_envs['below', 1] = env_bottom.select(first_row).copy() for i in range(2, env_bottom.Lx): below_row = env_bottom.row_tag(i-1) - row_envs["mid", i-1] = env_bottom.select(below_row).simple_copy() + row_envs["mid", i-1] = env_bottom.select(below_row).copy() if dense: env_bottom ^= (self.row_tag(i - 2), self.row_tag(i - 1)) else: env_bottom.contract_boundary_from_bottom_( (i - 2, i - 1), **compress_opts) - row_envs['below', i] = env_bottom.select(first_row).simple_copy() + row_envs['below', i] = env_bottom.select(first_row).copy() last_row = env_bottom.row_tag(self.Lx-1) - row_envs['mid', self.Lx-1] = env_bottom.select(last_row).simple_copy() + row_envs['mid', self.Lx-1] = env_bottom.select(last_row).copy() # downwards pass row_envs['above', self.Lx - 1] = FermionTensorNetwork([]) last_row = self.row_tag(self.Lx - 1) if dense: env_top ^= last_row - row_envs['above', self.Lx - 2] = env_top.select(last_row).simple_copy() + row_envs['above', self.Lx - 2] = env_top.select(last_row).copy() for i in range(env_top.Lx - 3, -1, -1): if dense: env_top ^= (self.row_tag(i + 1), self.row_tag(i + 2)) else: env_top.contract_boundary_from_top_( (i + 1, i + 2), **compress_opts) - row_envs['above', i] = env_top.select(last_row).simple_copy() + row_envs['above', i] = env_top.select(last_row).copy() return row_envs @@ -347,36 +347,36 @@ def compute_col_environments(self, dense=False, **compress_opts): # upwards pass col_envs['left', 0] = FermionTensorNetwork([]) first_col = self.col_tag(0) - col_envs['mid', 0] = env_left.select(first_col).simple_copy() + col_envs['mid', 0] = env_left.select(first_col).copy() if dense: env_left ^= first_col - col_envs['left', 1] = env_left.select(first_col).simple_copy() + col_envs['left', 1] = env_left.select(first_col).copy() for i in range(2, env_left.Ly): left_col = env_left.col_tag(i-1) - col_envs["mid", i-1] = env_left.select(left_col).simple_copy() + col_envs["mid", i-1] = env_left.select(left_col).copy() if dense: env_left ^= (self.col_tag(i - 2), self.col_tag(i - 1)) else: env_left.contract_boundary_from_left_( (i - 2, i - 1), **compress_opts) - col_envs['left', i] = env_left.select(first_col).simple_copy() + col_envs['left', i] = env_left.select(first_col).copy() last_col = env_left.col_tag(self.Ly-1) - col_envs['mid', self.Ly-1] = env_left.select(last_col).simple_copy() + col_envs['mid', self.Ly-1] = env_left.select(last_col).copy() # downwards pass col_envs['right', self.Ly - 1] = FermionTensorNetwork([]) last_col = self.col_tag(self.Ly - 1) if dense: env_right ^= last_col - col_envs['right', self.Ly - 2] = env_right.select(last_col).simple_copy() + col_envs['right', self.Ly - 2] = env_right.select(last_col).copy() for i in range(env_right.Ly - 3, -1, -1): if dense: env_right ^= (self.col_tag(i + 1), self.col_tag(i + 2)) else: env_right.contract_boundary_from_right_( (i + 1, i + 2), **compress_opts) - col_envs['right', i] = env_right.select(last_col).simple_copy() + col_envs['right', i] = env_right.select(last_col).copy() return col_envs @@ -699,7 +699,6 @@ def gate( input_tids = psi._get_tids_from_inds(bnds, which='any') isite = [psi.tensor_map[itid].get_fermion_info()[1] for itid in input_tids] - #psi |= TG psi.fermion_space.add_tensor(TG, virtual=True) # get the sites that used to have the physical indices From d0c6f5d8c93f8590992ade859c97d6d007851743 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 5 Feb 2021 14:15:06 -0800 Subject: [PATCH 34/61] docstring added, remove fermion_ops module and move it to pyblock3 backend --- quimb/tensor/fermion_2d_tebd.py | 54 +++++--- quimb/tensor/fermion_ops.py | 123 ------------------ tests/test_fermion/test_fermion_2d.py | 3 +- tests/test_fermion/test_numerics.py | 3 +- .../{test_ops.py => test_operators.py} | 7 +- 5 files changed, 40 insertions(+), 150 deletions(-) delete mode 100644 quimb/tensor/fermion_ops.py rename tests/test_fermion/{test_ops.py => test_operators.py} (96%) diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 4424924b..8b9d050e 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -2,20 +2,34 @@ import random import collections from itertools import product -from pyblock3.algebra.core import SubTensor -from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor -from quimb.tensor.fermion_2d import FPEPS,FermionTensorNetwork2DVector -from quimb.tensor.fermion_ops import to_exponential, eye, hubbard -from pyblock3.algebra.symmetry import SZ, BondInfo -from quimb.tensor.tensor_2d_tebd import SimpleUpdate as _SimpleUpdate -from quimb.tensor.tensor_2d_tebd import conditioner -from quimb.utils import pairwise -from quimb.tensor.tensor_2d import (gen_long_range_path, - nearest_neighbors) - -SMALL_VAL = 1e-10 +from ..utils import pairwise +#from .fermion_ops import eye, hubbard +from .tensor_2d_tebd import SimpleUpdate as _SimpleUpdate +from .tensor_2d_tebd import conditioner +from .tensor_2d import (gen_long_range_path, + nearest_neighbors) +from pyblock3.algebra.fermion_operators import eye, hubbard + +INVERSE_CUTOFF = 1e-10 def Hubbard2D(t, u, Lx, Ly): + """Create a LocalHam2D object for 2D Hubbard Model + + Parameters + ---------- + t : int or float + The hopping parameter + u : int or float + Onsite columb repulsion + Lx: int + Size in x direction + Ly: int + Size in y direction + + Returns + ------- + a LocalHam2D object + """ ham = dict() count_neighbour = lambda i,j: (i>0) + (i0) + (jSMALL_VAL] += self.gauge_smudge + mult_val.data[abs(mult_val.data)>INVERSE_CUTOFF] += self.gauge_smudge Tij.multiply_index_diagonal_( ind=bond_ind, x=mult_val, location=location) @@ -325,7 +339,7 @@ def env_neighbours(i, j): raise KeyError("gauge not found") bnd = self._psi.bond(site, neighbour) mult_val = Tsval.copy() - non_zero_ind = abs(mult_val.data)>SMALL_VAL + non_zero_ind = abs(mult_val.data)>INVERSE_CUTOFF mult_val.data[non_zero_ind] = (mult_val.data[non_zero_ind] + self.gauge_smudge) ** -1 Tij.multiply_index_diagonal_( ind=bnd, x=mult_val, location=location) @@ -333,8 +347,8 @@ def env_neighbours(i, j): def get_state(self, absorb_gauges=True): """Return the state, with the diagonal bond gauges either absorbed equally into the tensors on either side of them - (``absorb_gauges=True``, the default), or left lazily represented in - the tensor network with hyperedges (``absorb_gauges=False``). + (``absorb_gauges=True``, the default), lazy representation with + hyperedges not implemented """ psi = self._psi.copy() diff --git a/quimb/tensor/fermion_ops.py b/quimb/tensor/fermion_ops.py deleted file mode 100644 index e2268116..00000000 --- a/quimb/tensor/fermion_ops.py +++ /dev/null @@ -1,123 +0,0 @@ -import numpy as np -from itertools import product -from pyblock3.algebra.core import SubTensor -from pyblock3.algebra.fermion import SparseFermionTensor, FlatFermionTensor, _pack_flat_tensor, _unpack_flat_tensor -from pyblock3.algebra.symmetry import SZ, BondInfo -from .fermion_2d import FPEPS,FermionTensorNetwork2DVector - -def to_exponential(tsr, x): - ndim = tsr.ndim - if tsr.parity == 1: - raise ValueError("expontial of odd parity tensor not defined") - if np.mod(ndim, 2) !=0: - raise ValueError("dimension of the tensor must be even (%i)"%ndim) - ax = ndim //2 - data = [] - udata, sdata, vdata = [],[],[] - uq,sq,vq= [],[],[] - ushapes, vshapes, sshapes = [],[],[] - sz_labels = ((SZ(0),SZ(0)), (SZ(1), SZ(1))) - if ndim == 2: - parity_axes = None - else: - parity_axes = list(range(ax)) - - for szlab in sz_labels: - data, row_map, col_map = _pack_flat_tensor(tsr, szlab, ax, parity_axes) - el, ev = np.linalg.eig(data) - s = np.diag(np.exp(el*x)) - _unpack_flat_tensor(ev, row_map, 0, udata, uq, ushapes, parity_axes) - _unpack_flat_tensor(ev.conj().T, col_map, 1, vdata, vq, vshapes) - sq.append([SZ.to_flat(iq) for iq in szlab]) - sshapes.append(s.shape) - sdata.append(s.ravel()) - - sq = np.asarray(sq, dtype=np.uint32) - sshapes = np.asarray(sshapes, dtype=np.uint32) - sdata = np.concatenate(sdata) - s = FlatFermionTensor(sq, sshapes, sdata) - - uq = np.asarray(uq, dtype=np.uint32) - ushapes = np.asarray(ushapes, dtype=np.uint32) - udata = np.concatenate(udata) - - vq = np.asarray(vq, dtype=np.uint32) - vshapes = np.asarray(vshapes, dtype=np.uint32) - vdata = np.concatenate(vdata) - u = FlatFermionTensor(uq, ushapes, udata) - v = FlatFermionTensor(vq, vshapes, vdata) - - out = np.tensordot(u, s, axes=((-1,),(0,))) - out = np.tensordot(out, v, axes=((-1,),(0,))) - return out - -eye = FlatFermionTensor.eye - -def gen_h1(h=1.): - blocks= [] - for i, j in product(range(2), repeat=2): - qlab = (SZ(i), SZ(j), SZ(1-i), SZ(1-j)) - qlst = [q.n for q in qlab] - iblk = np.zeros([2,2,2,2]) - blocks.append(SubTensor(reduced=np.zeros([2,2,2,2]), q_labels=(SZ(i), SZ(j), SZ(i), SZ(j)))) - if (i+j)==1: - iblk[0,0,0,0] = iblk[i,j,j,i] = h - iblk[1,1,1,1] = iblk[j,i,i,j] = -h - else: - if i == 0: - iblk[0,1,0,1] = iblk[1,0,0,1] = h - iblk[1,0,1,0] = iblk[0,1,1,0] = -h - else: - iblk[0,1,0,1] = iblk[0,1,1,0] = -h - iblk[1,0,1,0] = iblk[1,0,0,1] = h - blocks.append(SubTensor(reduced=iblk, q_labels=qlab)) - hop = SparseFermionTensor(blocks=blocks).to_flat() - return hop - -hopping = lambda t=1.0: gen_h1(-t) - -def onsite_u(u=1): - umat0 = np.zeros([2,2]) - umat0[1,1] = u - umat1 = np.zeros([2,2]) - blocks = [SubTensor(reduced=umat0, q_labels=(SZ(0), SZ(0))), - SubTensor(reduced=umat1, q_labels=(SZ(1), SZ(1)))] - umat = SparseFermionTensor(blocks=blocks).to_flat() - return umat - -def hubbard(t, u, fac=None): - if fac is None: - fac = (1, 1) - faca, facb = fac - ham = hopping(t).to_sparse() - for iblk in ham: - qin, qout = iblk.q_labels[:2], iblk.q_labels[2:] - if qin != qout: continue - in_pair = [iq.n for iq in qin] - if in_pair == [0,0]: - iblk[1,0,1,0] += faca * u - iblk[0,1,0,1] += facb * u - iblk[1,1,1,1] += (faca + facb) * u - elif in_pair == [0,1]: - iblk[1,:,1,:] += faca * u * np.eye(2) - elif in_pair == [1,0]: - iblk[:,1,:,1] += facb * u * np.eye(2) - return ham.to_flat() - -def count_n(): - nmat0 = np.zeros([2,2]) - nmat0[1,1] = 2 - nmat1 = np.eye(2) - blocks = [SubTensor(reduced=nmat0, q_labels=(SZ(0), SZ(0))), - SubTensor(reduced=nmat1, q_labels=(SZ(1), SZ(1)))] - nmat = SparseFermionTensor(blocks=blocks).to_flat() - return nmat - -def measure_sz(): - zmat0 = np.zeros([2,2]) - zmat1 = np.eye(2) * .5 - zmat1[1,1]= -.5 - blocks = [SubTensor(reduced=zmat0, q_labels=(SZ(0), SZ(0))), - SubTensor(reduced=zmat1, q_labels=(SZ(1), SZ(1)))] - smat = SparseFermionTensor(blocks=blocks).to_flat() - return smat diff --git a/tests/test_fermion/test_fermion_2d.py b/tests/test_fermion/test_fermion_2d.py index 60bfa6fc..551d3284 100644 --- a/tests/test_fermion/test_fermion_2d.py +++ b/tests/test_fermion/test_fermion_2d.py @@ -4,8 +4,7 @@ from quimb.tensor.fermion_2d import FPEPS from pyblock3.algebra.fermion import SparseFermionTensor from pyblock3.algebra.symmetry import SZ, BondInfo - -from quimb.tensor import fermion_ops as ops +from pyblock3.algebra import fermion_operators as ops diff --git a/tests/test_fermion/test_numerics.py b/tests/test_fermion/test_numerics.py index f94b92c0..84c3c898 100644 --- a/tests/test_fermion/test_numerics.py +++ b/tests/test_fermion/test_numerics.py @@ -4,8 +4,7 @@ from quimb.tensor.fermion import ( FermionTensor, FermionTensorNetwork, tensor_contract) -from quimb.tensor.fermion_2d import ( - FPEPS, ) +from quimb.tensor.fermion_2d import FPEPS from pyblock3.algebra.symmetry import (SZ, BondInfo) from pyblock3.algebra.fermion import SparseFermionTensor diff --git a/tests/test_fermion/test_ops.py b/tests/test_fermion/test_operators.py similarity index 96% rename from tests/test_fermion/test_ops.py rename to tests/test_fermion/test_operators.py index 4d3854f1..874d97cd 100644 --- a/tests/test_fermion/test_ops.py +++ b/tests/test_fermion/test_operators.py @@ -2,7 +2,8 @@ import numpy as np import itertools from quimb.tensor.fermion_2d import gen_mf_peps, FPEPS -from quimb.tensor import fermion_ops as ops +#from quimb.tensor import fermion_ops as ops +from pyblock3.algebra import fermion_operators as ops from pyblock3.algebra.symmetry import SZ from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor @@ -94,7 +95,7 @@ def test_exponential_u(self): U = 4. tau = 0.02 uop = ops.onsite_u(U) - uop_exp = ops.to_exponential(uop, -tau) + uop_exp = uop.to_exponential(-tau) terms = {coo: uop_exp for coo in itertools.product(range(Lx), range(Ly))} result = psi.compute_local_expectation(terms, normalized=False, return_all=True) for ix, iy in itertools.product(range(Lx), range(Ly)): @@ -105,7 +106,7 @@ def test_exponential_hop(self): t = 3 tau = 0.1 hop = ops.hopping(t) - hop_exp = ops.to_exponential(hop, -tau) + hop_exp = hop.to_exponential(-tau) blocks=[] states = np.zeros([2,2]) states[1,0] = states[0,1] = .5 From 4294d736a66c314e9c2aaf92a84035763f287119 Mon Sep 17 00:00:00 2001 From: yangcal Date: Mon, 8 Feb 2021 12:04:05 -0800 Subject: [PATCH 35/61] add chemical potential support in Hubbard, cleanup --- quimb/tensor/fermion_2d.py | 5 ++--- quimb/tensor/fermion_2d_tebd.py | 16 ++++++++-------- tests/test_fermion/test_numerics.py | 28 +++------------------------- tests/test_fermion/test_operators.py | 10 ++++++---- 4 files changed, 19 insertions(+), 40 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 7b5f6309..8dbaf616 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -25,7 +25,6 @@ import numpy as np import functools from operator import add -from pyblock3.algebra.fermion import FlatFermionTensor INVERSE_CUTOFF = 1e-10 @@ -124,7 +123,7 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] snew = np.zeros_like(s.data) snew[idx] = 1/s.data[idx] - snew = FlatFermionTensor(s.q_labels, s.shapes, snew, idxs=s.idxs) + snew = s.__class__(s.q_labels, s.shapes, snew, idxs=s.idxs) t = inner_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) @@ -256,7 +255,7 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] snew = np.zeros_like(s.data) snew[idx] = 1/s.data[idx] - snew = FlatFermionTensor(s.q_labels, s.shapes, snew, idxs=s.idxs) + snew = s.__class__(s.q_labels, s.shapes, snew, idxs=s.idxs) t = new_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 8b9d050e..35b64861 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -3,28 +3,28 @@ import collections from itertools import product from ..utils import pairwise -#from .fermion_ops import eye, hubbard from .tensor_2d_tebd import SimpleUpdate as _SimpleUpdate from .tensor_2d_tebd import conditioner -from .tensor_2d import (gen_long_range_path, - nearest_neighbors) +from .tensor_2d import gen_long_range_path, nearest_neighbors from pyblock3.algebra.fermion_operators import eye, hubbard INVERSE_CUTOFF = 1e-10 -def Hubbard2D(t, u, Lx, Ly): +def Hubbard2D(t, u, Lx, Ly, mu=0.): """Create a LocalHam2D object for 2D Hubbard Model Parameters ---------- - t : int or float + t : scalar The hopping parameter - u : int or float + u : scalar Onsite columb repulsion Lx: int Size in x direction Ly: int Size in y direction + mu: scalar, optional + Chemical potential Returns ------- @@ -37,12 +37,12 @@ def Hubbard2D(t, u, Lx, Ly): if i+1 != Lx: where = ((i,j), (i+1,j)) count_b = count_neighbour(i+1,j) - uop = hubbard(t,u, (1./count_ij, 1./count_b)) + uop = hubbard(t,u, mu, (1./count_ij, 1./count_b)) ham[where] = uop if j+1 != Ly: where = ((i,j), (i,j+1)) count_b = count_neighbour(i,j+1) - uop = hubbard(t,u, (1./count_ij, 1./count_b)) + uop = hubbard(t,u, mu, (1./count_ij, 1./count_b)) ham[where] = uop return LocalHam2D(Lx, Ly, ham) diff --git a/tests/test_fermion/test_numerics.py b/tests/test_fermion/test_numerics.py index 84c3c898..c61e5c49 100644 --- a/tests/test_fermion/test_numerics.py +++ b/tests/test_fermion/test_numerics.py @@ -9,17 +9,6 @@ from pyblock3.algebra.symmetry import (SZ, BondInfo) from pyblock3.algebra.fermion import SparseFermionTensor -def get_err(A, B): - err = [] - nblk = A.shapes.shape[0] - for i in range(nblk): - dlt = np.sum(abs(A.q_labels[i] - B.q_labels), axis=1) - j = np.where(dlt==0)[0][0] - ist, ied = A.idxs[i], A.idxs[i+1] - jst, jed = B.idxs[j], B.idxs[j+1] - err.append(max(abs(A.data[ist:ied]-B.data[jst:jed]))) - return max(err) - np.random.seed(3) bond_1 = BondInfo({SZ(0):3, SZ(1): 2}) bond_2 = BondInfo({SZ(0):5, SZ(1): 5}) @@ -51,7 +40,7 @@ def test_backend(self): tsr_egbc = tensor_contract(tsr_abc, tsr_ega, output_inds=("e","g","b", "c")) egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) - err = get_err(tsr_egbc.data, egbc) + err = (egbc - tsr_egbc.data).norm() assert err < 1e-10 def test_contract_between(self): @@ -60,7 +49,7 @@ def test_contract_between(self): tsr_egbc = tn1["abc"].transpose("e","g","b","c") egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) - err = get_err(tsr_egbc.data, egbc) + err = (egbc - tsr_egbc.data).norm() assert err < 1e-10 def test_contract_all(self): @@ -79,20 +68,9 @@ def test_contract_ind(self): out = tn1["deg"].transpose("e","g","b","c") egbc = np.tensordot(deg, bcd, axes=[(0,),(2)]) - err = get_err(out.data, egbc) + err = (egbc - out.data).norm() assert err < 1e-10 -class TestCompress: - def test_backend(self): - pass - - def test_compress_between(self): - pass - -class TestCanonize: - def test_backend(self): - pass - class TestBalance: def test_balance_bonds(self): Lx = Ly = 3 diff --git a/tests/test_fermion/test_operators.py b/tests/test_fermion/test_operators.py index 874d97cd..5de3bc87 100644 --- a/tests/test_fermion/test_operators.py +++ b/tests/test_fermion/test_operators.py @@ -2,7 +2,6 @@ import numpy as np import itertools from quimb.tensor.fermion_2d import gen_mf_peps, FPEPS -#from quimb.tensor import fermion_ops as ops from pyblock3.algebra import fermion_operators as ops from pyblock3.algebra.symmetry import SZ from pyblock3.algebra.core import SubTensor @@ -65,13 +64,16 @@ def test_hubbard(self): psi = FPEPS.rand(Lx, Ly, 2) t = 2. U = 6. + mu = 0.2 hop = ops.hopping(t) uop = ops.onsite_u(U) - full_terms = {(ix, iy): uop for ix, iy in itertools.product(range(Lx), range(Ly))} + nop = ops.count_n() + full_terms = {(ix, iy): uop + mu*nop for ix, iy in itertools.product(range(Lx), range(Ly))} hterms = {coos: hop for coos in psi.gen_horizontal_bond_coos()} vterms = {coos: hop for coos in psi.gen_vertical_bond_coos()} full_terms.update(hterms) full_terms.update(vterms) + mu_terms = {(ix, iy): nop for ix, iy in itertools.product(range(Lx), range(Ly))} ene = psi.compute_local_expectation(full_terms, max_bond=12) ham = dict() @@ -81,12 +83,12 @@ def test_hubbard(self): if i+1 != Lx: where = ((i,j), (i+1,j)) count_b = count_neighbour(i+1,j) - uop = ops.hubbard(t,U, (1./count_ij, 1./count_b)) + uop = ops.hubbard(t,U, mu, (1./count_ij, 1./count_b)) ham[where] = uop if j+1 != Ly: where = ((i,j), (i,j+1)) count_b = count_neighbour(i,j+1) - uop = ops.hubbard(t,U, (1./count_ij, 1./count_b)) + uop = ops.hubbard(t,U, mu, (1./count_ij, 1./count_b)) ham[where] = uop ene1 = psi.compute_local_expectation(ham, max_bond=12) assert ene == pytest.approx(ene1, rel=1e-2) From d96bc34a0d55af01884809f2ed873fd6605e5434 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 24 Feb 2021 16:04:35 -0800 Subject: [PATCH 36/61] API for QPN modified --- quimb/tensor/fermion.py | 34 +++++----- quimb/tensor/fermion_2d.py | 90 ++++++++++----------------- tests/test_fermion/test_fermion_2d.py | 29 ++++----- tests/test_fermion/test_numerics.py | 31 ++++----- tests/test_fermion/test_operators.py | 64 +++++++++---------- 5 files changed, 109 insertions(+), 139 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 31805e7b..2f1939a4 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -13,6 +13,7 @@ from .tensor_core import tensor_contract as _tensor_contract from ..utils import oset, valmap from .array_ops import asarray, ndim +from pyblock3.algebra.symmetry import QPN def _contract_connected(T1, T2, output_inds=None): """Fermionic contraction of two tensors that are adjacent to each other. @@ -218,7 +219,8 @@ def tensor_split( rtags=None, stags=None, bond_ind=None, - right_inds=None + right_inds=None, + qpn_info=None ): """Decompose this Fermionic tensor into two fermionic tensors. @@ -312,7 +314,7 @@ def tensor_split( _right_inds =[T.inds.index(i) for i in right_inds] if method == "svd": - left, s, right = T.data.tensor_svd(_left_inds, right_idx=_right_inds, **opts) + left, s, right = T.data.tensor_svd(_left_inds, right_idx=_right_inds, qpn_info=qpn_info, **opts) else: raise NotImplementedError @@ -370,16 +372,17 @@ def _compress_connected(Tl, Tr, absorb='both', **compress_opts): left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] out = _contract_connected(Tl, Tr) + qpn_info = (Tl.data.dq, Tr.data.dq) if Tl.get_fermion_info()[1] < Tr.get_fermion_info()[1]: if absorb == "left": absorb = "right" elif absorb == "right": absorb = "left" r, l = out.split(left_inds=right_inds, right_inds=left_inds, - absorb=absorb, get="tensors", **compress_opts) + absorb=absorb, get="tensors", qpn_info=qpn_info, **compress_opts) else: l, r = out.split(left_inds=left_inds, right_inds=right_inds, - absorb=absorb, get="tensors", **compress_opts) + absorb=absorb, get="tensors", qpn_info=qpn_info, **compress_opts) return l, r def tensor_compress_bond( @@ -443,10 +446,12 @@ def _canonize_connected(T1, T2, absorb='right', **split_opts): raise ValueError("The tensors specified don't share an bond.") if T1.get_fermion_info()[1] < T2.get_fermion_info()[1]: - tRfact, new_T1 = T1.split(shared_ix, get="tensors", **split_opts) + qpn_info = (T1.data.dq.__class__(0), T1.data.dq) + tRfact, new_T1 = T1.split(shared_ix, get="tensors", qpn_info=qpn_info, **split_opts) new_T2 = _contract_connected(T2, tRfact) else: - new_T1, tRfact = T1.split(left_env_ix, get='tensors', **split_opts) + qpn_info = (T1.data.dq, T1.data.dq.__class__(0)) + new_T1, tRfact = T1.split(left_env_ix, get='tensors', qpn_info=qpn_info, **split_opts) new_T2 = _contract_connected(tRfact, T2) if absorb == "left": @@ -516,8 +521,8 @@ def tensor_balance_bond(t1, t2, smudge=1e-6): sblk1.append(SubTensor(reduced=np.diag(s**-0.25), q_labels=iblk1.q_labels)) sblk2.append(SubTensor(reduced=np.diag(s**0.25), q_labels=iblk2.q_labels)) - s1 = SparseFermionTensor(blocks=sblk1).to_flat() - s2 = SparseFermionTensor(blocks=sblk2).to_flat() + s1 = SparseFermionTensor(blocks=sblk1, pattern="+-").to_flat() + s2 = SparseFermionTensor(blocks=sblk2, pattern="+-").to_flat() t1.multiply_index_diagonal_(ix, s1, location="back") t2.multiply_index_diagonal_(ix, s2, location="front") @@ -881,12 +886,10 @@ def H(self): new_fs = FermionSpace() for tid, (tsr, site) in self.tensor_order.items(): T = tsr.copy() - reverse_order = list(range(tsr.ndim))[::-1] - new_data = T.data.permute(reverse_order).conj() + new_data = T.data.dagger new_inds = T.inds[::-1] T.modify(data=new_data, inds=new_inds) new_fs.add_tensor(T, tid, max_site-site, virtual=True) - return new_fs @@ -957,8 +960,8 @@ def ind_size(self, dim_or_ind): raise ValueError("%s indice not found in the tensor"%dim_or_ind) dim_or_ind = self.inds.index(dim_or_ind) - from pyblock3.algebra.symmetry import SZ, BondInfo - sz = [SZ.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] + from pyblock3.algebra.symmetry import QPN, BondInfo + sz = [QPN.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] sp = self.data.shapes[:,dim_or_ind] bond_dict = dict(zip(sz, sp)) return BondInfo(bond_dict) @@ -1083,8 +1086,7 @@ def H(self): U_{abc} a^{\dagger}b^{\dagger}c^{\dagger} -> U^{cba\star}cba Note this is different from Fermionic transposition """ - axes = list(range(self.ndim))[::-1] - data = self.data.permute(axes).conj() + data = self.data.dagger inds = self.inds[::-1] tsr = self.copy() tsr.modify(data=data, inds=inds) @@ -1463,7 +1465,7 @@ def H(self): for tid, (tsr, site) in fs.tensor_order.items(): reverse_order = list(range(tsr.ndim))[::-1] - new_data = tsr.data.permute(reverse_order).conj() + new_data = tsr.data.dagger new_inds = tsr.inds[::-1] tsr.modify(data=new_data, inds=new_inds) fs.tensor_order.update({tid: (tsr, max_site-site)}) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 8dbaf616..6cac73cb 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -39,11 +39,13 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, # tensors we are going to contract in the blob, reindex some to attach gate contract_ts = [] fermion_info = [] + qpn_infos = [] for t, coo in zip(original_ts, string): neighb_inds.append(tuple(ix for ix in t.inds if ix not in bonds_along)) contract_ts.append(t.reindex_(reindex_map) if coo in where else t) fermion_info.append(t.get_fermion_info()) + qpn_infos.append(t.data.dq) blob = tensor_contract(*contract_ts, TG, inplace=True) regauged = [] @@ -64,10 +66,10 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, bix = bonds_along[i] # split the blob! - + qpn_info = [blob.data.dq - qpn_infos[i], qpn_infos[i]] lix = tuple(oset(blob.inds)-oset(lix)) blob, *maybe_svals, inner_ts[i] = blob.split( - left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + left_inds=lix, get='tensors', bond_ind=bix, qpn_info=qpn_info, **compress_opts) # if singular values are returned (``absorb=None``) check if we should # return them via ``info``, e.g. for ``SimpleUpdate` @@ -98,8 +100,9 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, bix = bonds_along[j - 1] # split the blob! + qpn_info = [qpn_infos[j], blob.data.dq - qpn_infos[j]] inner_ts[j], *maybe_svals, blob= blob.split( - left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + left_inds=lix, get='tensors', bond_ind=bix, qpn_info=qpn_info, **compress_opts) # if singular values are returned (``absorb=None``) check if we should # return them via ``info``, e.g. for ``SimpleUpdate` @@ -123,7 +126,7 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] snew = np.zeros_like(s.data) snew[idx] = 1/s.data[idx] - snew = s.__class__(s.q_labels, s.shapes, snew, idxs=s.idxs) + snew = s.__class__(s.q_labels, s.shapes, snew, pattern="+-", idxs=s.idxs) t = inner_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) @@ -155,8 +158,9 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, fs = TG.fermion_owner[1]() tid_lst = [] for coo, rix, t in zip(string, inds_to_reduce, original_ts): + qpn_info = (t.data.dq, t.data.dq.__class__(0)) tq, tr = t.split(left_inds=None, right_inds=rix, - method='svd', get='tensors', absorb="right") + method='svd', get='tensors', absorb="right", qpn_info=qpn_info) fermion_info.append(t.get_fermion_info()) outer_ts.append(tq) inner_ts.append(tr.reindex_(reindex_map) if coo in where else tr) @@ -255,7 +259,7 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] snew = np.zeros_like(s.data) snew[idx] = 1/s.data[idx] - snew = s.__class__(s.q_labels, s.shapes, snew, idxs=s.idxs) + snew = s.__class__(s.q_labels, s.shapes, snew, pattern="+-", idxs=s.idxs) t = new_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) @@ -950,8 +954,8 @@ def __init__(self, arrays, *, shape='urdlp', tags=None, super().__init__(tensors, check_collisions=False, **tn_opts) @classmethod - def rand(cls, Lx, Ly, bond_dim, phys_dim=2, - dtype=float, seed=None, parity=None, + def rand(cls, Lx, Ly, bond_dim, qpn=None, phys_dim=1, + dtype=float, seed=None, shape="urdlp", qpn_map=None, **peps_opts): """Create a random (un-normalized) PEPS. @@ -980,64 +984,36 @@ def rand(cls, Lx, Ly, bond_dim, phys_dim=2, """ if seed is not None: np.random.seed(seed) - - arrays = [[None for _ in range(Ly)] for _ in range(Lx)] - - from pyblock3.algebra.fermion import SparseFermionTensor - from pyblock3.algebra.symmetry import SZ, BondInfo - - if isinstance(parity, np.ndarray): - if not parity.shape != (Lx, Ly): - raise ValueError("parity array shape not matching (Lx, Ly)") - elif isinstance(parity, int): - parity = np.ones((Lx, Ly), dtype=int) * (parity % 2) - elif parity is None: - parity = np.random.randint(0,2,Lx*Ly).reshape(Lx, Ly) + if qpn is None: qpn = (Lx*Ly, Lx*Ly%2) + from pyblock3.algebra import fermion_gen + if qpn_map is None: + distribution = peps_opts.pop("distribution", "even") + qpn_map = fermion_gen._gen_2d_qpn_map(Lx, Ly, qpn, distribution) + + if Lx >= Ly: + arrays = fermion_gen._qpn_map_to_col_skeleton(qpn_map, phys_dim, bond_dim, shape) else: - raise TypeError("parity type not recoginized") - - vir_info = BondInfo({SZ(0): bond_dim, SZ(1): bond_dim}) - phy_info = BondInfo({SZ(0): phys_dim, SZ(1): phys_dim}) - - for i, j in product(range(Lx), range(Ly)): - - shape = [] - if i != Lx - 1: # bond up - shape.append(vir_info) - if j != Ly - 1: # bond right - shape.append(vir_info) - if i != 0: # bond down - shape.append(vir_info) - if j != 0: # bond left - shape.append(vir_info) - - shape.append(phy_info) - dq = SZ(parity[i][j]) - - tsr = SparseFermionTensor.random(shape, dq=dq, dtype=dtype).to_flat() - tsr.data /= np.linalg.norm(tsr.data, 2) **(1.5 / tsr.ndim) - arrays[i][j] = tsr + arrays = fermion_gen._qpn_map_to_row_skeleton(qpn_map, phys_dim, bond_dim, shape) return cls(arrays, **peps_opts) -def _gen_site_wfn_tsr(state, ndim=2, ax=0): +def _gen_site_wfn_tsr(state, pattern=None, ndim=2, ax=0): from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor - from pyblock3.algebra.symmetry import SZ - state_map = {0:(0,0), 1:(1,0), 2:(1,1), 3:(0,1)} + from pyblock3.algebra.symmetry import QPN + state_map = {0:QPN(0,0), 1:QPN(1,1), 2:QPN(1,-1), 3:QPN(2,0)} if state not in state_map: raise KeyError("requested state not recoginized") - q_lab, ind = state_map[state] - q_label = [SZ(0),] * ax + [SZ(q_lab),] + [SZ(0),] *(ndim-ax-1) - shape = [1,] * ax + [2,] + [1,] *(ndim-ax-1) - dat = np.zeros([2]) - dat.put(ind, 1) - dat = dat.reshape(shape) + q_label = [QPN(0),] * ax + [state_map[state]] + [QPN(0),] *(ndim-ax-1) + shape = [1,] * ndim + dat = np.ones(shape) blocks = [SubTensor(reduced=dat, q_labels=q_label)] - smat = SparseFermionTensor(blocks=blocks).to_flat() + smat = SparseFermionTensor(blocks=blocks, pattern=pattern).to_flat() return smat def gen_mf_peps(state_array, shape='urdlp', **kwargs): + pattern_map = {"d": "+", "l":"+", "p":"+", + "u": "-", "r":"-"} Lx, Ly = state_array.shape arr = state_array.astype("int") cache = dict() @@ -1052,12 +1028,12 @@ def _gen_ij(i, j): array_order = array_order.replace('d', '') if j == 0: array_order = array_order.replace('l', '') - + pattern = "".join([pattern_map[i] for i in array_order]) ndim = len(array_order) ax = array_order.index('p') - key = (state, ndim, ax) + key = (state, ndim, ax, pattern) if key not in cache: - cache[key] = _gen_site_wfn_tsr(state, ndim, ax).copy() + cache[key] = _gen_site_wfn_tsr(state, pattern, ndim, ax).copy() return cache[key] tsr_array = [[_gen_ij(i,j) for j in range(Ly)] for i in range(Lx)] diff --git a/tests/test_fermion/test_fermion_2d.py b/tests/test_fermion/test_fermion_2d.py index 551d3284..2ca34e57 100644 --- a/tests/test_fermion/test_fermion_2d.py +++ b/tests/test_fermion/test_fermion_2d.py @@ -3,11 +3,7 @@ import itertools from quimb.tensor.fermion_2d import FPEPS from pyblock3.algebra.fermion import SparseFermionTensor -from pyblock3.algebra.symmetry import SZ, BondInfo -from pyblock3.algebra import fermion_operators as ops - - - +from pyblock3.algebra.symmetry import QPN, BondInfo class TestPEPSConstruct: @pytest.mark.parametrize('where', [ @@ -16,8 +12,8 @@ class TestPEPSConstruct: ]) @pytest.mark.parametrize('contract', [False, True]) def test_gate_2d_single_site(self, where, contract): - bond = BondInfo({SZ(0):2, SZ(1): 2}) - G = SparseFermionTensor.random((bond, bond)).to_flat() + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() Lx = 3 Ly = 3 psi = FPEPS.rand(Lx, Ly, 2, seed=42, tags='KET') @@ -32,8 +28,8 @@ def test_gate_2d_single_site(self, where, contract): [(1, 1), (2, 1)], [(2, 1), (2, 2)] ]) def test_gate_2d_two_site(self, where, contract): - bond = BondInfo({SZ(0):2, SZ(1): 2}) - G = SparseFermionTensor.random((bond, bond,bond,bond)).to_flat() + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + G = SparseFermionTensor.random((bond, bond,bond,bond), pattern="++--").to_flat() Lx = 3 Ly = 3 psi = FPEPS.rand(Lx, Ly, 2, seed=42, tags='KET') @@ -44,18 +40,19 @@ def test_gate_2d_two_site(self, where, contract): assert tn ^ all == pytest.approx(xe) class Test2DContract: + def test_contract_2d_one_layer_boundary(self): psi = FPEPS.rand(4, 4, 2, seed=42, tags='KET') norm = psi.make_norm() xe = norm.contract(all, optimize='auto-hq') - xt = norm.contract_boundary(max_bond=9) + xt = norm.contract_boundary(max_bond=6) assert xt == pytest.approx(xe, rel=1e-2) def test_contract_2d_two_layer_boundary(self): psi = FPEPS.rand(4, 4, 2, seed=42, tags='KET') norm = psi.make_norm() xe = norm.contract(all, optimize='auto-hq') - xt = norm.contract_boundary(max_bond=9, layer_tags=['KET', 'BRA']) + xt = norm.contract_boundary(max_bond=6, layer_tags=['KET', 'BRA']) assert xt == pytest.approx(xe, rel=1e-2) @pytest.mark.parametrize("two_layer", [False, True]) @@ -79,6 +76,7 @@ def test_compute_row_envs(self, two_layer): x = norm_i.contract(all) assert x == pytest.approx(ex, rel=1e-2) + @pytest.mark.parametrize("two_layer", [False, True]) def test_compute_col_envs(self, two_layer): psi = FPEPS.rand(2, 4, 2, seed=42, tags='KET') @@ -100,6 +98,7 @@ def test_compute_col_envs(self, two_layer): x = norm_i.contract(all) assert x == pytest.approx(ex, rel=1e-2) + def test_normalize(self): psi = FPEPS.rand(3, 3, 2, seed=42) norm = psi.make_norm().contract(all) @@ -108,10 +107,12 @@ def test_normalize(self): norm = psi.make_norm().contract(all) assert norm == pytest.approx(1.0, rel=1e-2) + + def test_compute_local_expectation_one_sites(self): peps = FPEPS.rand(4, 3, 2, seed=42) coos = list(itertools.product([0, 2, 3], [0, 1, 2])) - bond = BondInfo({SZ(0):2, SZ(1): 2}) + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) terms = {coo: SparseFermionTensor.random((bond, bond)).to_flat() for coo in coos} expecs = peps.compute_local_expectation( @@ -135,8 +136,8 @@ def test_compute_local_expectation_one_sites(self): def test_compute_local_expectation_two_sites(self): normalized=True peps = FPEPS.rand(4, 3, 2, seed=42) - bond = BondInfo({SZ(0):2, SZ(1): 2}) - Hij = SparseFermionTensor.random((bond, bond, bond, bond)).to_flat() + bond = BondInfo({QPN(0):1, QPN(2): 1, QPN(1,-1):1, QPN(1,1):1}) + Hij = SparseFermionTensor.random((bond, bond, bond, bond), pattern="++--").to_flat() hterms = {coos: Hij for coos in peps.gen_horizontal_bond_coos()} vterms = {coos: Hij for coos in peps.gen_vertical_bond_coos()} diff --git a/tests/test_fermion/test_numerics.py b/tests/test_fermion/test_numerics.py index c61e5c49..ef82f1ef 100644 --- a/tests/test_fermion/test_numerics.py +++ b/tests/test_fermion/test_numerics.py @@ -4,26 +4,26 @@ from quimb.tensor.fermion import ( FermionTensor, FermionTensorNetwork, tensor_contract) -from quimb.tensor.fermion_2d import FPEPS +from quimb.tensor.fermion_2d import FPEPS, gen_mf_peps -from pyblock3.algebra.symmetry import (SZ, BondInfo) +from pyblock3.algebra.symmetry import (QPN, BondInfo) from pyblock3.algebra.fermion import SparseFermionTensor np.random.seed(3) -bond_1 = BondInfo({SZ(0):3, SZ(1): 2}) -bond_2 = BondInfo({SZ(0):5, SZ(1): 5}) +bond_1 = BondInfo({QPN(0):3, QPN(1,1): 3, QPN(1,-1):3, QPN(2):3}) +bond_2 = BondInfo({QPN(0):5, QPN(1,1): 5, QPN(1,-1):5, QPN(2):5}) abc = SparseFermionTensor.random( - (bond_2, bond_1, bond_1), dq=SZ(1)).to_flat() + (bond_2, bond_1, bond_1), dq=QPN(1,1), pattern="+--").to_flat() bcd = SparseFermionTensor.random( - (bond_1, bond_1, bond_1), dq=SZ(1)).to_flat() + (bond_1, bond_1, bond_1), dq=QPN(1,-1), pattern="++-").to_flat() ega = SparseFermionTensor.random( - (bond_1, bond_1, bond_2), dq=SZ(1)).to_flat() + (bond_1, bond_1, bond_2), dq=QPN(1,1), pattern="-++").to_flat() deg = SparseFermionTensor.random( - (bond_1, bond_1, bond_1), dq=SZ(1)).to_flat() + (bond_1, bond_1, bond_1), dq=QPN(1,-1), pattern="-+-").to_flat() tsr_abc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) tsr_ega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) @@ -38,7 +38,6 @@ class TestContract: def test_backend(self): tsr_egbc = tensor_contract(tsr_abc, tsr_ega, output_inds=("e","g","b", "c")) - egbc = np.tensordot(ega, abc, axes=[(2,),(0,)]) err = (egbc - tsr_egbc.data).norm() assert err < 1e-10 @@ -66,31 +65,27 @@ def test_contract_ind(self): tn1 = tn.copy() tn1.contract_ind("d") out = tn1["deg"].transpose("e","g","b","c") - - egbc = np.tensordot(deg, bcd, axes=[(0,),(2)]) + egbc = np.tensordot(deg, bcd, axes=[(0,),(2,)]) err = (egbc - out.data).norm() assert err < 1e-10 + class TestBalance: def test_balance_bonds(self): - Lx = Ly = 3 - psi = FPEPS.rand(Lx, Ly, 2, seed=11) + Lx = Ly = 4 + psi = FPEPS.rand(Lx, Ly, 2) norm = psi.make_norm() exact = norm.contract(all, optimize="auto-hq") - psi1 = psi.balance_bonds() norm = psi1.make_norm() exact_bb = norm.contract(all, optimize="auto-hq") assert exact_bb == pytest.approx(exact, rel=1e-2) - for ix, iy in itertools.product(range(Lx), range(Ly)): - assert psi[ix,iy].norm() != pytest.approx(psi1[ix,iy], rel=1e-2) def test_equlaize_norm(self): Lx = Ly = 3 - psi = FPEPS.rand(Lx, Ly, 2, seed=24) + psi = FPEPS.rand(Lx, Ly, 2) norm = psi.make_norm() exact = norm.contract(all, optimize="auto-hq") - psi1 = psi.equalize_norms() norm = psi1.make_norm() exact_en = norm.contract(all, optimize="auto-hq") diff --git a/tests/test_fermion/test_operators.py b/tests/test_fermion/test_operators.py index 5de3bc87..d75595c3 100644 --- a/tests/test_fermion/test_operators.py +++ b/tests/test_fermion/test_operators.py @@ -3,7 +3,7 @@ import itertools from quimb.tensor.fermion_2d import gen_mf_peps, FPEPS from pyblock3.algebra import fermion_operators as ops -from pyblock3.algebra.symmetry import SZ +from pyblock3.algebra.symmetry import QPN from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor @@ -12,23 +12,42 @@ state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) psi = gen_mf_peps(state_array) - - class TestOperators: def test_hopping(self): t = 2 hop = ops.hopping(t) blocks=[] - states = np.zeros([2,2]) - states[0] = .5 - blocks.append(SubTensor(reduced=states, q_labels=(SZ(0), SZ(1)))) #0+, 0- - blocks.append(SubTensor(reduced=-states.T, q_labels=(SZ(1), SZ(0)))) #+0, -0, eigenstate of hopping - # psi = |0+> + |0-> - |+0> - |-0>, eigenstate of hopping(eigval = t) - ket = SparseFermionTensor(blocks=blocks).to_flat() + states = np.ones([1,1]) * .5 ** .5 + blocks.append(SubTensor(reduced=states, q_labels=(QPN(0),QPN(1,1)))) #0+ + blocks.append(SubTensor(reduced=states, q_labels=(QPN(1,1),QPN(0)))) #+0, eigenstate of hopping + # psi = |0+> + |+0> - |-0>, eigenstate of hopping(eigval = -t) + ket = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + bra = ket.dagger + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(-t, rel=1e-2) + + def test_exponential_hop(self): + t = 3 + tau = 0.1 + hop = ops.hopping(t) + hop_exp = hop.to_exponential(-tau) + blocks=[] + states = np.ones([1,1]) * .5 + blocks.append(SubTensor(reduced=states, q_labels=(QPN(2), QPN(0)))) + blocks.append(SubTensor(reduced=states, q_labels=(QPN(0), QPN(2)))) + blocks.append(SubTensor(reduced=-states, q_labels=(QPN(1,1), QPN(1,-1)))) + blocks.append(SubTensor(reduced=states, q_labels=(QPN(1,-1), QPN(1,1)))) + + ket = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) - bra = ket.permute([1,0]).conj() + bra = ket.dagger expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(t, rel=1e-2) + assert expec == pytest.approx(2*t, rel=1e-2) + + ket1 = np.tensordot(hop_exp, ket, axes=((2,3),(0,1))) + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(np.e**(-2*t*tau), rel=1e-2) def test_onsite_u(self): U = 4. @@ -103,26 +122,3 @@ def test_exponential_u(self): for ix, iy in itertools.product(range(Lx), range(Ly)): ref = np.e**(-tau*U) if state_array[ix,iy]==3 else 1. assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) - - def test_exponential_hop(self): - t = 3 - tau = 0.1 - hop = ops.hopping(t) - hop_exp = hop.to_exponential(-tau) - blocks=[] - states = np.zeros([2,2]) - states[1,0] = states[0,1] = .5 - blocks.append(SubTensor(reduced=states, q_labels=(SZ(0), SZ(0)))) - states = np.zeros([2,2]) - states[1,0] =-.5 - states[0,1] =.5 - blocks.append(SubTensor(reduced=states, q_labels=(SZ(1), SZ(1)))) - ket = SparseFermionTensor(blocks=blocks).to_flat() - ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) - bra = ket.permute([1,0]).conj() - expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(2*t, rel=1e-2) - - ket1 = np.tensordot(hop_exp, ket, axes=((2,3),(0,1))) - expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(np.e**(-2*t*tau), rel=1e-2) From bb328397438d7ff39cdefe14589b083ca6267f56 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 3 Mar 2021 14:58:58 -0800 Subject: [PATCH 37/61] bugfix --- quimb/tensor/fermion_2d.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 6cac73cb..2620ff59 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -249,9 +249,8 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, fs.replace_tensor(work_site, inner_ts[i], tid=tid, virtual=True) else: fs.insert_tensor(work_site+i, inner_ts[i], tid=tid, virtual=True) - new_ts = [ - tensor_contract(ts, tr, output_inds=to.inds, inplace=True) + tensor_contract(ts, tr, inplace=True).transpose_like_(to) for to, ts, tr in zip(original_ts, outer_ts, inner_ts) ] @@ -778,7 +777,7 @@ def compute_local_expectation( terms, normalized=False, autogroup=True, - contract_optimize='auto-hq', + contract_optimize='greedy', return_all=False, layer_tags=('KET', 'BRA'), plaquette_envs=None, From 8201017c5854bff92721e0d460d93fb8a8200db0 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 2 Apr 2021 16:46:51 -0700 Subject: [PATCH 38/61] parse compress opts to canonize_row/column when contracting boundary --- quimb/tensor/tensor_2d.py | 48 +++++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/quimb/tensor/tensor_2d.py b/quimb/tensor/tensor_2d.py index 05165b7a..1ec6ee2c 100644 --- a/quimb/tensor/tensor_2d.py +++ b/quimb/tensor/tensor_2d.py @@ -708,7 +708,7 @@ def _contract_boundary_from_bottom_single( # │ │ │ │ │ # ●══●══<══<══< # - self.canonize_row(i, sweep=canonize_sweep, yrange=yrange) + self.canonize_row(i, sweep=canonize_sweep, yrange=yrange, max_bond=max_bond, cutoff=cutoff) # # │ │ │ │ │ --> │ │ │ │ │ --> │ │ │ │ │ @@ -870,7 +870,7 @@ def _contract_boundary_from_top_single( # ●══●══<══<══< # | | | | | # - self.canonize_row(i, sweep=canonize_sweep, yrange=yrange) + self.canonize_row(i, sweep=canonize_sweep, yrange=yrange, max_bond=max_bond, cutoff=cutoff) # # >──●══●══●══● --> >──>──●══●══● --> >──>──>──●══● # | | | | | --> | | | | | --> | | | | | @@ -1035,7 +1035,7 @@ def _contract_boundary_from_left_single( # ║ ║ # ●── ●── # - self.canonize_column(j, sweep=canonize_sweep, xrange=xrange) + self.canonize_column(j, sweep=canonize_sweep, xrange=xrange, max_bond=max_bond, cutoff=cutoff) # # v── ●── # ║ │ @@ -1208,7 +1208,7 @@ def _contract_boundary_from_right_single( # ║ ║ # ──● ──● # - self.canonize_column(j, sweep=canonize_sweep, xrange=xrange) + self.canonize_column(j, sweep=canonize_sweep, xrange=xrange, max_bond=max_bond, cutoff=cutoff) # # ──v ──● # ║ │ @@ -1597,12 +1597,21 @@ def compute_row_environments( row_envs = dict() - # upwards pass row_envs['below', 0] = TensorNetwork([]) + row_envs['above', self.Lx - 1] = TensorNetwork([]) + + if self.Lx == 1: + return row_envs + # upwards pass first_row = self.row_tag(0) env_bottom = self.copy() if dense: env_bottom ^= first_row + else: + for j in range(self.Ly): + env_bottom ^= self.site_tag(0, j) + env_bottom.compress_row(0, sweep="right", compress_opts=compress_opts) + row_envs['below', 1] = env_bottom.select(first_row) for i in range(2, env_bottom.Lx): if dense: @@ -1613,11 +1622,15 @@ def compute_row_environments( row_envs['below', i] = env_bottom.select(first_row) # downwards pass - row_envs['above', self.Lx - 1] = TensorNetwork([]) last_row = self.row_tag(self.Lx - 1) env_top = self.copy() if dense: env_top ^= last_row + else: + for j in range(self.Ly): + env_top ^= self.site_tag(self.Lx-1, j) + env_top.compress_row(self.Lx-1, sweep="right", compress_opts=compress_opts) + row_envs['above', self.Lx - 2] = env_top.select(last_row) for i in range(env_top.Lx - 3, -1, -1): if dense: @@ -1726,10 +1739,18 @@ def compute_col_environments( # rightwards pass col_envs['left', 0] = TensorNetwork([]) + col_envs['right', self.Ly - 1] = TensorNetwork([]) + if self.Ly == 1: + return col_envs first_column = self.col_tag(0) env_right = self.copy() if dense: env_right ^= first_column + else: + for i in range(self.Lx): + env_right ^= self.site_tag(i, 0) + env_right.compress_column(0, sweep="up", compress_opts=compress_opts) + col_envs['left', 1] = env_right.select(first_column) for j in range(2, env_right.Ly): if dense: @@ -1742,11 +1763,20 @@ def compute_col_environments( # leftwards pass last_column = self.col_tag(self.Ly - 1) env_left = self.copy() - col_envs['right', self.Ly - 1] = TensorNetwork([]) + if dense: + env_left ^= last_column + else: + for i in range(self.Lx): + env_left ^= self.site_tag(i, self.Ly-1) + env_left.compress_column(self.Ly-1, sweep="up", compress_opts=compress_opts) + col_envs['right', self.Ly - 2] = env_left.select(last_column) for j in range(self.Ly - 3, -1, -1): - env_left.contract_boundary_from_right_( - (j + 1, j + 2), **contract_boundary_opts) + if dense: + env_left ^= (self.col_tag(j + 1), self.col_tag(j + 2)) + else: + env_left.contract_boundary_from_right_( + (j + 1, j + 2), **contract_boundary_opts) col_envs['right', j] = env_left.select(last_column) return col_envs From 5754ccc345ab94a4bd4a06bdd2a9e2d915827a95 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 2 Apr 2021 16:49:28 -0700 Subject: [PATCH 39/61] restructure to stay consistent with new pyblock3 API; add compression step in bilayer boundary contraction; tests modified --- quimb/tensor/fermion.py | 23 +- quimb/tensor/fermion_2d.py | 145 +++++++----- quimb/tensor/fermion_2d_tebd.py | 11 +- quimb/tensor/fermion_gen.py | 52 +++++ quimb/tensor/fermion_interface.py | 23 ++ quimb/tensor/test_fermion/test_fermion_2d.py | 230 +++++++++++++++++++ quimb/tensor/test_fermion/test_numerics.py | 165 +++++++++++++ quimb/tensor/test_fermion/test_operators.py | 225 ++++++++++++++++++ 8 files changed, 802 insertions(+), 72 deletions(-) create mode 100644 quimb/tensor/fermion_gen.py create mode 100644 quimb/tensor/fermion_interface.py create mode 100644 quimb/tensor/test_fermion/test_fermion_2d.py create mode 100644 quimb/tensor/test_fermion/test_numerics.py create mode 100644 quimb/tensor/test_fermion/test_operators.py diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 2f1939a4..e1308bdf 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -13,7 +13,10 @@ from .tensor_core import tensor_contract as _tensor_contract from ..utils import oset, valmap from .array_ops import asarray, ndim -from pyblock3.algebra.symmetry import QPN +from . import fermion_interface + +DEFAULT_SYMMETRY = fermion_interface.DEFAULT_SYMMETRY +BondInfo = fermion_interface.BondInfo def _contract_connected(T1, T2, output_inds=None): """Fermionic contraction of two tensors that are adjacent to each other. @@ -941,6 +944,10 @@ def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): raise ValueError(f"The 'left' indices {self.left_inds} are not " f"found in {self.inds}.") + @property + def symmetry(self): + return self.data.symmetry + @property def fermion_owner(self): return self._fermion_owner @@ -960,8 +967,7 @@ def ind_size(self, dim_or_ind): raise ValueError("%s indice not found in the tensor"%dim_or_ind) dim_or_ind = self.inds.index(dim_or_ind) - from pyblock3.algebra.symmetry import QPN, BondInfo - sz = [QPN.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] + sz = [self.symmetry.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] sp = self.data.shapes[:,dim_or_ind] bond_dict = dict(zip(sz, sp)) return BondInfo(bond_dict) @@ -1150,6 +1156,8 @@ def is_mergeable(*ts_or_tsn): if obj.fermion_owner is None: return False hashval, fsobj, tid = obj.fermion_owner + if fsobj() is None: + return False fs_lst.append(hashval) site_lst.append(fsobj()[tid][1]) elif isinstance(obj, FermionTensorNetwork): @@ -1464,11 +1472,10 @@ def H(self): max_site = max(fs.sites) for tid, (tsr, site) in fs.tensor_order.items(): - reverse_order = list(range(tsr.ndim))[::-1] - new_data = tsr.data.dagger - new_inds = tsr.inds[::-1] - tsr.modify(data=new_data, inds=new_inds) - fs.tensor_order.update({tid: (tsr, max_site-site)}) + new_data = tsr.data.dagger + new_inds = tsr.inds[::-1] + tsr.modify(data=new_data, inds=new_inds) + fs.tensor_order.update({tid: (tsr, max_site-site)}) return tn def __mul__(self, other): diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 2620ff59..bb101e94 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -296,9 +296,26 @@ def __or__(self, other): def flatten(self, fuse_multibonds=True, inplace=False): raise NotImplementedError - def compute_row_environments(self, dense=False, **compress_opts): - layer_tags = compress_opts.get("layer_tags", None) - reorder_tags = compress_opts.pop("reorder_tags", layer_tags) + def compute_row_environments( + self, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, + dense=False, + compress_opts=None, + **contract_boundary_opts + ): + contract_boundary_opts['max_bond'] = max_bond + contract_boundary_opts['cutoff'] = cutoff + contract_boundary_opts['canonize'] = canonize + contract_boundary_opts['layer_tags'] = layer_tags + contract_boundary_opts['compress_opts'] = compress_opts + + if compress_opts is not None: + reorder_tags = compress_opts.pop("reorder_tags", layer_tags) + else: + reorder_tags = layer_tags env_bottom = self.reorder_right_row(layer_tags=reorder_tags) env_top = env_bottom.copy() @@ -308,8 +325,16 @@ def compute_row_environments(self, dense=False, **compress_opts): row_envs['below', 0] = FermionTensorNetwork([]) first_row = self.row_tag(0) row_envs['mid', 0] = env_bottom.select(first_row).copy() + row_envs['above', self.Lx - 1] = FermionTensorNetwork([]) + if self.Lx == 1: + return row_envs if dense: env_bottom ^= first_row + else: + for j in range(self.Ly): + env_bottom ^= self.site_tag(0, j) + env_bottom.compress_row(0, sweep="right", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) + row_envs['below', 1] = env_bottom.select(first_row).copy() for i in range(2, env_bottom.Lx): below_row = env_bottom.row_tag(i-1) @@ -318,30 +343,51 @@ def compute_row_environments(self, dense=False, **compress_opts): env_bottom ^= (self.row_tag(i - 2), self.row_tag(i - 1)) else: env_bottom.contract_boundary_from_bottom_( - (i - 2, i - 1), **compress_opts) + (i - 2, i - 1), **contract_boundary_opts) row_envs['below', i] = env_bottom.select(first_row).copy() last_row = env_bottom.row_tag(self.Lx-1) row_envs['mid', self.Lx-1] = env_bottom.select(last_row).copy() # downwards pass - row_envs['above', self.Lx - 1] = FermionTensorNetwork([]) last_row = self.row_tag(self.Lx - 1) if dense: env_top ^= last_row + else: + for j in range(self.Ly): + env_top ^= self.site_tag(self.Lx-1, j) + env_top.compress_row(self.Lx-1, sweep="right", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) + row_envs['above', self.Lx - 2] = env_top.select(last_row).copy() for i in range(env_top.Lx - 3, -1, -1): if dense: env_top ^= (self.row_tag(i + 1), self.row_tag(i + 2)) else: env_top.contract_boundary_from_top_( - (i + 1, i + 2), **compress_opts) + (i + 1, i + 2), **contract_boundary_opts) row_envs['above', i] = env_top.select(last_row).copy() return row_envs - def compute_col_environments(self, dense=False, **compress_opts): - layer_tags = compress_opts.get("layer_tags", None) - reorder_tags = compress_opts.pop("reorder_tags", layer_tags) + def compute_col_environments( + self, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, + dense=False, + compress_opts=None, + **contract_boundary_opts + ): + contract_boundary_opts['max_bond'] = max_bond + contract_boundary_opts['cutoff'] = cutoff + contract_boundary_opts['canonize'] = canonize + contract_boundary_opts['layer_tags'] = layer_tags + contract_boundary_opts['compress_opts'] = compress_opts + + if compress_opts is not None: + reorder_tags = compress_opts.pop("reorder_tags", layer_tags) + else: + reorder_tags = layer_tags env_left = self.reorder_upward_column(layer_tags=reorder_tags) env_right = env_left.copy() col_envs = dict() @@ -350,8 +396,16 @@ def compute_col_environments(self, dense=False, **compress_opts): col_envs['left', 0] = FermionTensorNetwork([]) first_col = self.col_tag(0) col_envs['mid', 0] = env_left.select(first_col).copy() + col_envs['right', self.Ly - 1] = FermionTensorNetwork([]) + if self.Ly == 1: + return col_envs + if dense: env_left ^= first_col + else: + for i in range(self.Lx): + env_left ^= self.site_tag(i, 0) + env_left.compress_column(0, sweep="up", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) col_envs['left', 1] = env_left.select(first_col).copy() for i in range(2, env_left.Ly): @@ -361,23 +415,26 @@ def compute_col_environments(self, dense=False, **compress_opts): env_left ^= (self.col_tag(i - 2), self.col_tag(i - 1)) else: env_left.contract_boundary_from_left_( - (i - 2, i - 1), **compress_opts) + (i - 2, i - 1), **contract_boundary_opts) col_envs['left', i] = env_left.select(first_col).copy() last_col = env_left.col_tag(self.Ly-1) col_envs['mid', self.Ly-1] = env_left.select(last_col).copy() # downwards pass - col_envs['right', self.Ly - 1] = FermionTensorNetwork([]) last_col = self.col_tag(self.Ly - 1) if dense: env_right ^= last_col + else: + for i in range(self.Lx): + env_right ^= self.site_tag(i, self.Ly-1) + env_right.compress_column(self.Ly-1, sweep="up", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) col_envs['right', self.Ly - 2] = env_right.select(last_col).copy() for i in range(env_right.Ly - 3, -1, -1): if dense: env_right ^= (self.col_tag(i + 1), self.col_tag(i + 2)) else: env_right.contract_boundary_from_right_( - (i + 1, i + 2), **compress_opts) + (i + 1, i + 2), **contract_boundary_opts) col_envs['right', i] = env_right.select(last_col).copy() return col_envs @@ -386,6 +443,10 @@ def _compute_plaquette_environments_row_first( self, x_bsz, y_bsz, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, second_dense=None, row_envs=None, **compute_environment_opts @@ -396,7 +457,8 @@ def _compute_plaquette_environments_row_first( # first we contract from either side to produce column environments if row_envs is None: row_envs = self.compute_row_environments( - **compute_environment_opts) + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) # next we form vertical strips and contract from both top and bottom # for each column @@ -430,7 +492,8 @@ def _compute_plaquette_environments_row_first( # col_envs[i] = row_i.compute_col_environments( xrange=(max(i - 1, 0), min(i + x_bsz, self.Lx - 1)), - dense=second_dense, **compute_environment_opts) + dense=second_dense, max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) plaquette_envs = dict() for i0, j0 in product(range(self.Lx - x_bsz + 1), @@ -469,6 +532,10 @@ def _compute_plaquette_environments_col_first( self, x_bsz, y_bsz, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, second_dense=None, col_envs=None, **compute_environment_opts @@ -479,7 +546,8 @@ def _compute_plaquette_environments_col_first( # first we contract from either side to produce column environments if col_envs is None: col_envs = self.compute_col_environments( - **compute_environment_opts) + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) # next we form vertical strips and contract from both top and bottom # for each column @@ -518,7 +586,8 @@ def _compute_plaquette_environments_col_first( # row_envs[j] = col_j.compute_row_environments( yrange=(max(j - 1, 0), min(j + y_bsz, self.Ly - 1)), - dense=second_dense, **compute_environment_opts) + dense=second_dense, max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) # then range through all the possible plaquettes, selecting the correct # boundary tensors from either the column or row environments @@ -981,6 +1050,7 @@ def rand(cls, Lx, Ly, bond_dim, qpn=None, phys_dim=1, ------- psi : PEPS """ + raise NotImplementedError if seed is not None: np.random.seed(seed) if qpn is None: qpn = (Lx*Ly, Lx*Ly%2) @@ -995,46 +1065,3 @@ def rand(cls, Lx, Ly, bond_dim, qpn=None, phys_dim=1, arrays = fermion_gen._qpn_map_to_row_skeleton(qpn_map, phys_dim, bond_dim, shape) return cls(arrays, **peps_opts) - -def _gen_site_wfn_tsr(state, pattern=None, ndim=2, ax=0): - from pyblock3.algebra.core import SubTensor - from pyblock3.algebra.fermion import SparseFermionTensor - from pyblock3.algebra.symmetry import QPN - state_map = {0:QPN(0,0), 1:QPN(1,1), 2:QPN(1,-1), 3:QPN(2,0)} - if state not in state_map: - raise KeyError("requested state not recoginized") - q_label = [QPN(0),] * ax + [state_map[state]] + [QPN(0),] *(ndim-ax-1) - shape = [1,] * ndim - dat = np.ones(shape) - blocks = [SubTensor(reduced=dat, q_labels=q_label)] - smat = SparseFermionTensor(blocks=blocks, pattern=pattern).to_flat() - return smat - -def gen_mf_peps(state_array, shape='urdlp', **kwargs): - pattern_map = {"d": "+", "l":"+", "p":"+", - "u": "-", "r":"-"} - Lx, Ly = state_array.shape - arr = state_array.astype("int") - cache = dict() - def _gen_ij(i, j): - state = arr[i, j] - array_order = shape - if i == Lx - 1: - array_order = array_order.replace('u', '') - if j == Ly - 1: - array_order = array_order.replace('r', '') - if i == 0: - array_order = array_order.replace('d', '') - if j == 0: - array_order = array_order.replace('l', '') - pattern = "".join([pattern_map[i] for i in array_order]) - ndim = len(array_order) - ax = array_order.index('p') - key = (state, ndim, ax, pattern) - if key not in cache: - cache[key] = _gen_site_wfn_tsr(state, pattern, ndim, ax).copy() - return cache[key] - - tsr_array = [[_gen_ij(i,j) for j in range(Ly)] for i in range(Lx)] - - return FPEPS(tsr_array, shape=shape, **kwargs) diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 35b64861..4494a29b 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -6,11 +6,11 @@ from .tensor_2d_tebd import SimpleUpdate as _SimpleUpdate from .tensor_2d_tebd import conditioner from .tensor_2d import gen_long_range_path, nearest_neighbors -from pyblock3.algebra.fermion_operators import eye, hubbard +from .fermion_interface import DEFAULT_SYMMETRY, eye, to_exponential INVERSE_CUTOFF = 1e-10 -def Hubbard2D(t, u, Lx, Ly, mu=0.): +def Hubbard2D(t, u, Lx, Ly, mu=0., symmetry=DEFAULT_SYMMETRY): """Create a LocalHam2D object for 2D Hubbard Model Parameters @@ -30,6 +30,7 @@ def Hubbard2D(t, u, Lx, Ly, mu=0.): ------- a LocalHam2D object """ + from quimb.tensor.fermion_interface import Hubbard ham = dict() count_neighbour = lambda i,j: (i>0) + (i0) + (j1e-10 + + def test_equlaize_norm(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.equalize_norms() + exact_en = norm1.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + ref1 = list(norm1.tensor_map.values())[0].norm() + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert tsr1.norm() == pytest.approx(ref1, rel=1e-2) + +@pytest.mark.usefixtures('u1setup') +class TestU1(TestU11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestZ4(TestU11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestZ2(TestU11): + pass diff --git a/quimb/tensor/test_fermion/test_operators.py b/quimb/tensor/test_fermion/test_operators.py new file mode 100644 index 00000000..d2d99835 --- /dev/null +++ b/quimb/tensor/test_fermion/test_operators.py @@ -0,0 +1,225 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.fermion_2d import FPEPS +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra import fermion_ops +from quimb.tensor.fermion_interface import U11, U1, Z4, Z2, SparseFermionTensor +from quimb.tensor.fermion_gen import gen_mf_peps + +@pytest.fixture(scope='class') +def u11setup(request): + request.cls.t = 2 + request.cls.U = 4 + request.cls.tau = 0.1 + request.cls.mu = 0.2 + request.cls.symmetry = U11 + states = np.ones([1,1]) * .5 ** .5 + blocks = [SubTensor(reduced=states, q_labels=(U11(0),U11(1,1))), #0+ + SubTensor(reduced=states, q_labels=(U11(1,1),U11(0)))] #+0, eigenstate of hopping + request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + blocks=[] + states = np.ones([1,1]) * .5 + blocks = [SubTensor(reduced=states, q_labels=(U11(2), U11(0))), + SubTensor(reduced=states, q_labels=(U11(0), U11(2))), + SubTensor(reduced=-states, q_labels=(U11(1,1), U11(1,-1))), + SubTensor(reduced=states, q_labels=(U11(1,-1), U11(1,1)))] + request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + Lx = Ly = 4 + request.cls.Lx = Lx + request.cls.Ly = Ly + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry='u11') + request.cls.fac = (0.5, 0.3) + +@pytest.fixture(scope='class') +def u1setup(request): + request.cls.t = 2 + request.cls.U = 4 + request.cls.tau = 0.1 + request.cls.mu = 0.2 + request.cls.symmetry = U1 + states = np.zeros([1,2]) + states[0,0] = .5 ** .5 + blocks = [SubTensor(reduced=states, q_labels=(U1(0),U1(1))), #0+ + SubTensor(reduced=states, q_labels=(U1(1),U1(0)))] #+0, eigenstate of hopping + request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + blocks=[] + states = np.zeros([2,2]) + states[0,1] = -.5 + states[1,0] = .5 + blocks = [SubTensor(reduced=np.ones([1,1]) * .5, q_labels=(U1(2), U1(0))), + SubTensor(reduced=np.ones([1,1]) * .5, q_labels=(U1(0), U1(2))), + SubTensor(reduced=states, q_labels=(U1(1), U1(1)))] + request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + Lx = Ly = 4 + request.cls.Lx = Lx + request.cls.Ly = Ly + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry='u1') + request.cls.fac = (0.5, 0.3) + +@pytest.fixture(scope='class') +def z4setup(request): + request.cls.t = 2 + request.cls.U = 4 + request.cls.tau = 0.1 + request.cls.mu = 0.2 + request.cls.symmetry = Z4 + states = np.zeros([2,2]) + states[0,0] = .5 ** .5 + blocks = [SubTensor(reduced=states, q_labels=(Z4(0),Z4(1))), #0+ + SubTensor(reduced=states, q_labels=(Z4(1),Z4(0)))] #+0, eigenstate of hopping + request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + blocks=[] + states = np.zeros([2,2]) + states[1,0] = .5 + blocks = [SubTensor(reduced=states, q_labels=(Z4(0), Z4(0))), + SubTensor(reduced=states.T, q_labels=(Z4(0), Z4(0))), + SubTensor(reduced=-states.T, q_labels=(Z4(1), Z4(1))), + SubTensor(reduced=states, q_labels=(Z4(1), Z4(1)))] + request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + Lx = Ly = 4 + request.cls.Lx = Lx + request.cls.Ly = Ly + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry='z4') + request.cls.fac = (0.5, 0.3) + +@pytest.fixture(scope='class') +def z2setup(request): + request.cls.t = 2 + request.cls.U = 4 + request.cls.tau = 0.1 + request.cls.mu = 0.2 + request.cls.symmetry = Z2 + states = np.zeros([2,2]) + states[0,0] = .5 ** .5 + blocks = [SubTensor(reduced=states, q_labels=(Z2(0),Z2(1))), #0+ + SubTensor(reduced=states, q_labels=(Z2(1),Z2(0)))] #+0, eigenstate of hopping + request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + blocks=[] + states = np.zeros([2,2]) + states[1,0] = .5 + blocks = [SubTensor(reduced=states, q_labels=(Z2(0), Z2(0))), + SubTensor(reduced=states.T, q_labels=(Z2(0), Z2(0))), + SubTensor(reduced=-states.T, q_labels=(Z2(1), Z2(1))), + SubTensor(reduced=states, q_labels=(Z2(1), Z2(1)))] + + request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() + + Lx = Ly = 4 + request.cls.Lx = Lx + request.cls.Ly = Ly + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry='z2') + request.cls.fac = (0.5, 0.3) + +@pytest.mark.usefixtures('u11setup') +class TestU11: + def test_hopping(self): + t = self.t + hop = fermion_ops.H1(-t, symmetry=self.symmetry) + ket = self.hop_psi + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + bra = ket.dagger + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(-t, rel=1e-2) + + def test_hopping_exponential(self): + t = self.t + tau = self.tau + hop = fermion_ops.H1(-t, symmetry=self.symmetry) + #hop_exp = hop.to_exponential(-tau) + hop_exp = fermion_ops.get_flat_exponential(hop, -tau) + ket = self.hop_exp_psi + bra = ket.dagger + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(2*t, rel=1e-2) + + ket1 = np.tensordot(hop_exp, ket, axes=((2,3),(0,1))) + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(np.e**(-2*t*tau), rel=1e-2) + + def test_onsite_u(self): + U = self.U + uop = fermion_ops.onsite_U(U, symmetry=self.symmetry) + terms = {coo: uop for coo in itertools.product(range(self.Lx), range(self.Ly))} + psi = self.peps + state_array = self.state_array + result = psi.compute_local_expectation(terms, normalized=False, return_all=True) + for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): + ref = U if state_array[ix,iy]==3 else 0. + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_sz(self): + sz = fermion_ops.measure_SZ(symmetry=self.symmetry) + terms = {coo: sz for coo in itertools.product(range(self.Lx), range(self.Ly))} + result = self.peps.compute_local_expectation(terms, normalized=False, return_all=True) + ref_dic = {0:0., 1:0.5, 2:-.5, 3:0.} + for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): + state = self.state_array[ix,iy] + ref = ref_dic[state] + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_n(self): + nop = fermion_ops.ParticleNumber(symmetry=self.symmetry) + terms = {coo: nop for coo in itertools.product(range(self.Lx), range(self.Ly))} + result = self.peps.compute_local_expectation(terms, normalized=False, return_all=True) + ref_dic = {0:0., 1:1, 2:1, 3:2} + for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): + state = self.state_array[ix,iy] + ref = ref_dic[state] + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_exponential_u(self): + U = self.U + tau = self.tau + uop = fermion_ops.onsite_U(U, symmetry=self.symmetry) + uop_exp = fermion_ops.get_flat_exponential(uop, -tau) + terms = {coo: uop_exp for coo in itertools.product(range(self.Lx), range(self.Ly))} + result = self.peps.compute_local_expectation(terms, normalized=False, return_all=True) + for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): + ref = np.e**(-tau*U) if self.state_array[ix,iy]==3 else 1. + assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) + + def test_hubbard(self): + mu = self.mu + hop = fermion_ops.H1(-self.t, symmetry=self.symmetry) + uop = fermion_ops.onsite_U(self.U, symmetry=self.symmetry) + nop = fermion_ops.ParticleNumber(symmetry=self.symmetry) + faca, facb = self.fac + hub = fermion_ops.Hubbard(self.t, self.U, mu=mu, fac=self.fac, symmetry=self.symmetry) + ket = self.hop_exp_psi + bra = ket.dagger + + ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) + ket1 = ket1 + faca*np.tensordot(uop, ket, axes=((-1,),(0,))) + ket1 = ket1 + facb*np.tensordot(uop, ket, axes=((-1,),(1,))).transpose([1,0]) + ket1 = ket1 + faca*mu*np.tensordot(nop, ket, axes=((-1,),(0,))) + ket1 = ket1 + facb*mu*np.tensordot(nop, ket, axes=((-1,),(1,))).transpose([1,0]) + expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + + ket1 = np.tensordot(hub, ket, axes=((2,3),(0,1))) + expec1 = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] + assert expec == pytest.approx(expec1, rel=1e-2) + +@pytest.mark.usefixtures('u1setup') +class TestU1(TestU11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestZ4(TestU11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestZ2(TestU11): + pass From 8659d5deb6bfddb5847433c9cde9466abe8f87e8 Mon Sep 17 00:00:00 2001 From: yangcal Date: Sun, 11 Apr 2021 14:41:15 -0700 Subject: [PATCH 40/61] bug fix for gauge ordering in SimpleUpdate --- quimb/tensor/fermion.py | 17 ++++++++++++----- quimb/tensor/fermion_2d.py | 4 ++-- quimb/tensor/fermion_2d_tebd.py | 29 +++++++++++++++++++---------- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index e1308bdf..a0385632 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -349,7 +349,7 @@ def tensor_split( if get == 'tensors': return tensors - return FermionTensorNetwork(tensors, check_collisions=False, virtual=True) + return FermionTensorNetwork(tensors[::-1], check_collisions=False, virtual=True) def _compress_connected(Tl, Tr, absorb='both', **compress_opts): """Compression of two Fermionic tensors that are adjacent to each other. @@ -524,8 +524,12 @@ def tensor_balance_bond(t1, t2, smudge=1e-6): sblk1.append(SubTensor(reduced=np.diag(s**-0.25), q_labels=iblk1.q_labels)) sblk2.append(SubTensor(reduced=np.diag(s**0.25), q_labels=iblk2.q_labels)) - s1 = SparseFermionTensor(blocks=sblk1, pattern="+-").to_flat() - s2 = SparseFermionTensor(blocks=sblk2, pattern="+-").to_flat() + sign1 = t1.data.pattern[t1.inds.index(ix)] + sign2 = t2.data.pattern[t2.inds.index(ix)] + s1_pattern = {"+":"-+", "-":"+-"}[sign1] + s2_pattern = {"-":"-+", "+":"+-"}[sign2] + s1 = SparseFermionTensor(blocks=sblk1, pattern=s1_pattern).to_flat() + s2 = SparseFermionTensor(blocks=sblk2, pattern=s2_pattern).to_flat() t1.multiply_index_diagonal_(ix, s1, location="back") t2.multiply_index_diagonal_(ix, s2, location="front") @@ -966,8 +970,11 @@ def ind_size(self, dim_or_ind): if dim_or_ind not in self.inds: raise ValueError("%s indice not found in the tensor"%dim_or_ind) dim_or_ind = self.inds.index(dim_or_ind) - - sz = [self.symmetry.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] + ipattern = self.data.pattern[dim_or_ind] + if ipattern=="+": + sz = [self.symmetry.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] + else: + sz = [-self.symmetry.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] sp = self.data.shapes[:,dim_or_ind] bond_dict = dict(zip(sz, sp)) return BondInfo(bond_dict) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index bb101e94..7af77fa9 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -126,7 +126,7 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] snew = np.zeros_like(s.data) snew[idx] = 1/s.data[idx] - snew = s.__class__(s.q_labels, s.shapes, snew, pattern="+-", idxs=s.idxs) + snew = s.__class__(s.q_labels, s.shapes, snew, pattern=s.pattern, idxs=s.idxs) t = inner_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) @@ -258,7 +258,7 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] snew = np.zeros_like(s.data) snew[idx] = 1/s.data[idx] - snew = s.__class__(s.q_labels, s.shapes, snew, pattern="+-", idxs=s.idxs) + snew = s.__class__(s.q_labels, s.shapes, snew, pattern=s.pattern, idxs=s.idxs) t = new_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 4494a29b..358795b3 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -227,6 +227,12 @@ def __repr__(self): s = "" return s.format(self.Lx, self.Ly, len(self.terms)) +def _get_location(Ti, Tj): + if Ti.get_fermion_info()[1]INVERSE_CUTOFF] += self.gauge_smudge @@ -292,13 +303,11 @@ def env_neighbours(i, j): Ta, Tb = self._psi[site_a], self._psi[site_b] if (site_a, site_b) in self.gauges: Tsval = self.gauges[(site_a, site_b)] - loca, locb = ("back", "front") elif (site_b, site_a) in self.gauges: Tsval = self.gauges[(site_b, site_a)] - loca, locb = ("front", "back") else: raise KeyError("gauge not found") - + loca, locb = _get_location(Ta, Tb) mult_val = Tsval.copy() mult_val.data = Tsval.data ** .5 bnd = self._psi.bond(site_a, site_b) @@ -332,16 +341,15 @@ def env_neighbours(i, j): for neighbour in neighbours[site]: if (site, neighbour) in self.gauges: Tsval = self.gauges[(site, neighbour)] - location = "back" elif (neighbour, site) in self.gauges: Tsval = self.gauges[(neighbour, site)] - location = "front" else: raise KeyError("gauge not found") bnd = self._psi.bond(site, neighbour) mult_val = Tsval.copy() non_zero_ind = abs(mult_val.data)>INVERSE_CUTOFF mult_val.data[non_zero_ind] = (mult_val.data[non_zero_ind] + self.gauge_smudge) ** -1 + location = _get_location(Tij, self._psi[neighbour])[0] Tij.multiply_index_diagonal_( ind=bnd, x=mult_val, location=location) @@ -360,10 +368,11 @@ def get_state(self, absorb_gauges=True): bnd = psi.bond(ija, ijb) Ta = psi[ija] Tb = psi[ijb] + loca, locb = _get_location(Ta, Tb) mult_val = Tsval.copy() mult_val.data = Tsval.data ** .5 - Ta.multiply_index_diagonal_(bnd, mult_val, location='back') - Tb.multiply_index_diagonal_(bnd, mult_val, location='front') + Ta.multiply_index_diagonal_(bnd, mult_val, location=loca) + Tb.multiply_index_diagonal_(bnd, mult_val, location=locb) if self.condition_tensors: conditioner(psi, balance_bonds=self.condition_balance_bonds) From 3998bc3d936220e3d7eafc800b5d9115b2bc4a68 Mon Sep 17 00:00:00 2001 From: yangcal Date: Sun, 11 Apr 2021 14:41:34 -0700 Subject: [PATCH 41/61] bug fix for numerics test --- quimb/tensor/test_fermion/test_numerics.py | 48 ++++++++++++---------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/quimb/tensor/test_fermion/test_numerics.py b/quimb/tensor/test_fermion/test_numerics.py index 0c80fc2a..19321784 100644 --- a/quimb/tensor/test_fermion/test_numerics.py +++ b/quimb/tensor/test_fermion/test_numerics.py @@ -12,9 +12,10 @@ def u11setup(request): bond1 = BondInfo({U11(0):3, U11(1,1): 3, U11(1,-1):3, U11(2):3}) bond2 = BondInfo({U11(0):5, U11(1,1): 5, U11(1,-1):5, U11(2):5}) request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U11(1,1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(1,-1), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,1), pattern="-++").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(1,-1), pattern="-+-").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(-1,-1), pattern="++-").to_flat() + + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,-1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(-1,1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) @@ -25,8 +26,8 @@ def u11setup(request): bc = rand((bond1, bond1), dq=U11(1,-1), pattern="++").to_flat() Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab, inds=['a','b1'], tags=["ab1"]) - Tbc1 = FermionTensor(bc, inds=['b1','c'], tags=["bc1"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) yield @@ -34,10 +35,12 @@ def u11setup(request): def u1setup(request): bond1 = BondInfo({U1(0):3, U1(1): 3, U1(3):3, U1(2):3}) bond2 = BondInfo({U1(0):5, U1(1): 5, U1(3):5, U1(2):5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(-1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(3), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="-++").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(3), pattern="-+-").to_flat() + + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(2), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(-2), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) @@ -48,8 +51,8 @@ def u1setup(request): bc = rand((bond1, bond1), dq=U1(1), pattern="++").to_flat() Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab, inds=['a','b1'], tags=["ab1"]) - Tbc1 = FermionTensor(bc, inds=['b1','c'], tags=["bc1"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) yield @@ -57,10 +60,11 @@ def u1setup(request): def z4setup(request): bond1 = BondInfo({Z4(0):3, Z4(1): 3, Z4(3):3, Z4(2):3}) bond2 = BondInfo({Z4(0):5, Z4(1): 5, Z4(3):5, Z4(2):5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z4(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(3), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(1), pattern="-++").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(3), pattern="-+-").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(2), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(0), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) @@ -71,8 +75,8 @@ def z4setup(request): bc = rand((bond1, bond1), dq=Z4(1), pattern="++").to_flat() Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab, inds=['a','b1'], tags=["ab1"]) - Tbc1 = FermionTensor(bc, inds=['b1','c'], tags=["bc1"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) yield @@ -80,10 +84,10 @@ def z4setup(request): def z2setup(request): bond1 = BondInfo({Z2(0):3, Z2(1): 3}) bond2 = BondInfo({Z2(0):5, Z2(1): 5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(1), pattern="+--").to_flat() + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(0), pattern="+--").to_flat() request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z2(1), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="-++").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(1), pattern="-+-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(0), pattern="+-+").to_flat() request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) @@ -94,12 +98,12 @@ def z2setup(request): bc = rand((bond1, bond1), dq=Z2(1), pattern="++").to_flat() Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab*1.3, inds=['a','b1'], tags=["ab1"]) - Tbc1 = FermionTensor(bc*1.5, inds=['b1','c'], tags=["bc1"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) yield -@pytest.mark.usefixtures('u1setup') +@pytest.mark.usefixtures('u11setup') class TestU11: def test_backend(self): Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) From f6aa564211f3d00176fbd9d20712ebe9b51a4686 Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 15 Apr 2021 14:27:47 -0700 Subject: [PATCH 42/61] API adjustment and cleanup; rename modules in effort for general boson/fermion symmetry classes --- quimb/tensor/block_interface.py | 55 +++++++++++++++++ quimb/tensor/block_tools.py | 64 ++++++++++++++++++++ quimb/tensor/fermion.py | 5 +- quimb/tensor/fermion_2d.py | 11 +--- quimb/tensor/fermion_2d_tebd.py | 22 +++---- quimb/tensor/fermion_gen.py | 16 ++--- quimb/tensor/fermion_interface.py | 23 ------- quimb/tensor/test_fermion/test_fermion_2d.py | 2 +- quimb/tensor/test_fermion/test_numerics.py | 2 +- quimb/tensor/test_fermion/test_operators.py | 2 +- 10 files changed, 144 insertions(+), 58 deletions(-) create mode 100644 quimb/tensor/block_interface.py create mode 100644 quimb/tensor/block_tools.py delete mode 100644 quimb/tensor/fermion_interface.py diff --git a/quimb/tensor/block_interface.py b/quimb/tensor/block_interface.py new file mode 100644 index 00000000..8be9ee74 --- /dev/null +++ b/quimb/tensor/block_interface.py @@ -0,0 +1,55 @@ +import sys +from pyblock3.algebra.fermion_symmetry import U11, U1, Z2, Z4, Z22 +from pyblock3.algebra.symmetry import BondInfo +from pyblock3.algebra.fermion import eye, SparseFermionTensor +from pyblock3.algebra import fermion_setting as setting +from pyblock3.algebra import fermion_ops + +this = sys.modules[__name__] +this.DEFAULT_SYMMETRY = "U1" +this.USE_CPP = True +this.USE_FERMION = True +symmetry_map = setting.symmetry_map + +def set_symmetry(symmetry): + symmetry = symmetry.upper() + if symmetry not in symmetry_map: + raise KeyError("input symmetry %s not supported"%symmetry) + this.DEFAULT_SYMMETRY = symmetry + setting.set_symmetry(symmetry) + +def set_backend(use_cpp): + this.USE_CPP = use_cpp + setting.set_flat(use_cpp) + +def set_fermion(use_fermion): + this.USE_FERMION = use_fermion + setting.set_fermion(use_fermion) + +def set(**kwargs): + symmetry = kwargs.pop("symmetry", this.DEFAULT_SYMMETRY) + use_fermion = kwargs.pop("fermion", this.USE_FERMION) + use_cpp = kwargs.pop("use_cpp", this.USE_CPP) + set_symmetry(symmetry) + set_fermion(use_fermion) + set_backend(use_cpp) + +def dispatch_settings(*keys): + dict = {"symmetry": "DEFAULT_SYMMETRY", + "fermion": "USE_FERMION", + "use_cpp": "USE_CPP"} + _settings = [] + for ikey in keys: + if ikey not in dict: + raise KeyError("%s not a valid backend setting"%ikey) + _settings.append(getattr(this, dict[ikey])) + if len(_settings) == 1: + _settings = _settings[0] + return _settings + +to_exponential = fermion_ops.get_exponential +H1 = fermion_ops.H1 +Hubbard = fermion_ops.Hubbard +onsite_U = fermion_ops.onsite_U +measure_SZ = fermion_ops.measure_SZ +ParticleNumber = fermion_ops.ParticleNumber diff --git a/quimb/tensor/block_tools.py b/quimb/tensor/block_tools.py new file mode 100644 index 00000000..995aade1 --- /dev/null +++ b/quimb/tensor/block_tools.py @@ -0,0 +1,64 @@ +from quimb.tensor import block_interface as bitf +import numpy as np + +def apply(T, func): + use_cpp = bitf.dispatch_settings("use_cpp") + if use_cpp: + new_T = T.copy() + new_T.data = func(new_T.data) + else: + new_T = T.copy() + for iblk in new_T: + iblk[:] = func(iblk[:]) + return new_T + +def sqrt(T): + _sqrt = lambda x : x**.5 + return apply(T, _sqrt) + +def inv_with_smudge(T, cutoff=1e-10, gauge_smudge=1e-6): + def _inv_with_smudge(arr): + new_arr = np.zeros_like(arr) + ind = abs(arr) > cutoff + new_arr[ind] = (arr[ind] + gauge_smudge) ** -1 + return new_arr + return apply(T, _inv_with_smudge) + +def add_with_smudge(T, cutoff=1e-10, gauge_smudge=1e-6): + def _add_with_smudge(arr): + ind = abs(arr) > cutoff + arr[ind] += gauge_smudge + return arr + return apply(T, _add_with_smudge) + +''' +bitf.set(symmetry="z22", use_cpp=True, fermion=True) +H = bitf.Hubbard(1,4,0.1) +H.data = abs(H.data) +H.data[H.data==0] = 2.0 +Hsqrt = sqrt(H) +print("sqrt") +print((Hsqrt.data**2-H.data).sum()) + +Hi = inv_with_smudge(H) +print("inv") +print(Hi.data*H.data) +Ha = add_with_smudge(H) +print("add") +print(Ha.data-H.data) + +bitf.set(symmetry="z2", use_cpp=False, fermion=True) +H = bitf.Hubbard(1,4,0.1).to_flat() +H.data = abs(H.data) +H.data[H.data==0] = 2.0 +H = H.to_sparse() +Hsqrt = sqrt(H).to_flat() +print("sqrt") +print((Hsqrt.data**2-H.to_flat().data).sum()) +Hi = inv_with_smudge(H).to_flat() +print("inv") +print(Hi.data*H.to_flat().data) +Ha = add_with_smudge(H).to_flat() +print("add") +print(Ha.data-H.to_flat().data) +''' diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index a0385632..799e6a7e 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -13,10 +13,9 @@ from .tensor_core import tensor_contract as _tensor_contract from ..utils import oset, valmap from .array_ops import asarray, ndim -from . import fermion_interface +from . import block_interface -DEFAULT_SYMMETRY = fermion_interface.DEFAULT_SYMMETRY -BondInfo = fermion_interface.BondInfo +BondInfo = block_interface.BondInfo def _contract_connected(T1, T2, output_inds=None): """Fermionic contraction of two tensors that are adjacent to each other. diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 7af77fa9..ff94716b 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -19,6 +19,7 @@ tags_to_oset, bonds ) +from .block_tools import inv_with_smudge from ..utils import check_opt, pairwise from collections import defaultdict from itertools import product @@ -123,10 +124,7 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, break # SVD funcs needs to be modify and make sure S has even parity for i, bix, location, s in regauged: - idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] - snew = np.zeros_like(s.data) - snew[idx] = 1/s.data[idx] - snew = s.__class__(s.q_labels, s.shapes, snew, pattern=s.pattern, idxs=s.idxs) + snew = inv_with_smudge(s, INVERSE_CUTOFF, gauge_smudge=0) t = inner_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) @@ -255,10 +253,7 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, ] for i, bix, location, s in regauged: - idx = np.where(abs(s.data)>INVERSE_CUTOFF)[0] - snew = np.zeros_like(s.data) - snew[idx] = 1/s.data[idx] - snew = s.__class__(s.q_labels, s.shapes, snew, pattern=s.pattern, idxs=s.idxs) + snew = inv_with_smudge(s, INVERSE_CUTOFF, gauge_smudge=0) t = new_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 358795b3..6d4fa8a3 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -6,11 +6,12 @@ from .tensor_2d_tebd import SimpleUpdate as _SimpleUpdate from .tensor_2d_tebd import conditioner from .tensor_2d import gen_long_range_path, nearest_neighbors -from .fermion_interface import DEFAULT_SYMMETRY, eye, to_exponential +from .block_interface import eye, to_exponential, Hubbard +from . import block_tools INVERSE_CUTOFF = 1e-10 -def Hubbard2D(t, u, Lx, Ly, mu=0., symmetry=DEFAULT_SYMMETRY): +def Hubbard2D(t, u, Lx, Ly, mu=0., symmetry=None): """Create a LocalHam2D object for 2D Hubbard Model Parameters @@ -30,7 +31,6 @@ def Hubbard2D(t, u, Lx, Ly, mu=0., symmetry=DEFAULT_SYMMETRY): ------- a LocalHam2D object """ - from quimb.tensor.fermion_interface import Hubbard ham = dict() count_neighbour = lambda i,j: (i>0) + (i0) + (jINVERSE_CUTOFF] += self.gauge_smudge + mult_val = block_tools.add_with_smudge(Tsval, INVERSE_CUTOFF, self.gauge_smudge) Tij.multiply_index_diagonal_( ind=bond_ind, x=mult_val, location=location) @@ -308,8 +307,7 @@ def env_neighbours(i, j): else: raise KeyError("gauge not found") loca, locb = _get_location(Ta, Tb) - mult_val = Tsval.copy() - mult_val.data = Tsval.data ** .5 + mult_val = block_tools.sqrt(Tsval) bnd = self._psi.bond(site_a, site_b) Ta.multiply_index_diagonal_(ind=bnd, x=mult_val, location=loca) Tb.multiply_index_diagonal_(ind=bnd, x=mult_val, location=locb) @@ -327,8 +325,7 @@ def env_neighbours(i, j): bond_pair = (site_b, site_a) s = info['singular_values', bond_pair] if self.gauge_renorm: - # keep the singular values from blowing up - s = s / np.sum(s.data**2) ** 0.5 + s = s / s.norm() if bond_pair not in self.gauges: del self.gauges[(bond_pair[1], bond_pair[0])] @@ -346,9 +343,7 @@ def env_neighbours(i, j): else: raise KeyError("gauge not found") bnd = self._psi.bond(site, neighbour) - mult_val = Tsval.copy() - non_zero_ind = abs(mult_val.data)>INVERSE_CUTOFF - mult_val.data[non_zero_ind] = (mult_val.data[non_zero_ind] + self.gauge_smudge) ** -1 + mult_val = block_tools.inv_with_smudge(Tsval, INVERSE_CUTOFF, self.gauge_smudge) location = _get_location(Tij, self._psi[neighbour])[0] Tij.multiply_index_diagonal_( ind=bnd, x=mult_val, location=location) @@ -369,8 +364,7 @@ def get_state(self, absorb_gauges=True): Ta = psi[ija] Tb = psi[ijb] loca, locb = _get_location(Ta, Tb) - mult_val = Tsval.copy() - mult_val.data = Tsval.data ** .5 + mult_val = block_tools.sqrt(Tsval) Ta.multiply_index_diagonal_(bnd, mult_val, location=loca) Tb.multiply_index_diagonal_(bnd, mult_val, location=locb) diff --git a/quimb/tensor/fermion_gen.py b/quimb/tensor/fermion_gen.py index 0b0ff9ef..e44ca6ee 100644 --- a/quimb/tensor/fermion_gen.py +++ b/quimb/tensor/fermion_gen.py @@ -1,15 +1,15 @@ +import numpy as np from pyblock3.algebra.core import SubTensor from pyblock3.algebra.fermion import SparseFermionTensor from pyblock3.algebra import fermion_encoding -from quimb.tensor.fermion_interface import DEFAULT_SYMMETRY +import quimb.tensor.block_interface as bitf from quimb.tensor.fermion_2d import FPEPS -import numpy as np -from itertools import product pattern_map = {"d": "+", "l":"+", "p":"+", "u": "-", "r":"-"} -def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=DEFAULT_SYMMETRY): +def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=None): + if symmetry is None: symmetry = bitf.DEFAULT_SYMMETRY state_map = fermion_encoding.get_state_map(symmetry) if state not in state_map: raise KeyError("requested state not recoginized") @@ -21,11 +21,13 @@ def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=DEFAULT_SYMMETRY): ind = (0,)* ax + (ind,) + (0,) * (ndim-ax-1) dat[ind] = 1 blocks = [SubTensor(reduced=dat, q_labels=q_label)] - T = SparseFermionTensor(blocks=blocks, pattern=pattern).to_flat() + T = SparseFermionTensor(blocks=blocks, pattern=pattern) + if bitf.USE_CPP: + T = T.to_flat() return T -def gen_mf_peps(state_array, shape='urdlp', symmetry=DEFAULT_SYMMETRY, **kwargs): - +def gen_mf_peps(state_array, shape='urdlp', symmetry=None, **kwargs): + if symmetry is None: symmetry = bitf.DEFAULT_SYMMETRY Lx, Ly = state_array.shape arr = state_array.astype("int") cache = dict() diff --git a/quimb/tensor/fermion_interface.py b/quimb/tensor/fermion_interface.py deleted file mode 100644 index 6b69c82c..00000000 --- a/quimb/tensor/fermion_interface.py +++ /dev/null @@ -1,23 +0,0 @@ -from pyblock3.algebra.fermion_symmetry import U11, U1, Z2, Z4 -from pyblock3.algebra.symmetry import BondInfo -from pyblock3.algebra.fermion import eye, SparseFermionTensor -from pyblock3.algebra import fermion_setting as setting -from pyblock3.algebra import fermion_ops - -DEFAULT_SYMMETRY = U11 -symmetry_map = setting.symmetry_map - -def set_symmetry(symmetry_string): - global DEFAULT_SYMMETRY - symmetry_string = symmetry_string.upper() - if symmetry_string not in symmetry_map: - raise KeyError("input symmetry %s not supported"%symmetry_string) - DEFAULT_SYMMETRY = symmetry_map[symmetry_string] - setting.set_symmetry(symmetry_string) - -to_exponential = fermion_ops.get_flat_exponential -H1 = fermion_ops.H1 -Hubbard = fermion_ops.Hubbard -onsite_U = fermion_ops.onsite_U -measure_SZ = fermion_ops.measure_SZ -ParticleNumber = fermion_ops.ParticleNumber diff --git a/quimb/tensor/test_fermion/test_fermion_2d.py b/quimb/tensor/test_fermion/test_fermion_2d.py index 841cd890..2eaddb36 100644 --- a/quimb/tensor/test_fermion/test_fermion_2d.py +++ b/quimb/tensor/test_fermion/test_fermion_2d.py @@ -1,7 +1,7 @@ import pytest import numpy as np import itertools -from quimb.tensor.fermion_interface import BondInfo, SparseFermionTensor, U11, U1, Z4, Z2 +from quimb.tensor.block_interface import BondInfo, SparseFermionTensor, U11, U1, Z4, Z2 from quimb.tensor.fermion_gen import gen_mf_peps @pytest.fixture(scope='class') diff --git a/quimb/tensor/test_fermion/test_numerics.py b/quimb/tensor/test_fermion/test_numerics.py index 19321784..baada7f5 100644 --- a/quimb/tensor/test_fermion/test_numerics.py +++ b/quimb/tensor/test_fermion/test_numerics.py @@ -2,7 +2,7 @@ import numpy as np from quimb.tensor.fermion import ( FermionTensor, FermionTensorNetwork, tensor_contract) -from quimb.tensor.fermion_interface import BondInfo, U11, U1, Z2, Z4 +from quimb.tensor.block_interface import BondInfo, U11, U1, Z2, Z4 from pyblock3.algebra.fermion import SparseFermionTensor rand = SparseFermionTensor.random diff --git a/quimb/tensor/test_fermion/test_operators.py b/quimb/tensor/test_fermion/test_operators.py index d2d99835..aa3a8785 100644 --- a/quimb/tensor/test_fermion/test_operators.py +++ b/quimb/tensor/test_fermion/test_operators.py @@ -4,7 +4,7 @@ from quimb.tensor.fermion_2d import FPEPS from pyblock3.algebra.core import SubTensor from pyblock3.algebra import fermion_ops -from quimb.tensor.fermion_interface import U11, U1, Z4, Z2, SparseFermionTensor +from quimb.tensor.block_interface import U11, U1, Z4, Z2, SparseFermionTensor from quimb.tensor.fermion_gen import gen_mf_peps @pytest.fixture(scope='class') From ea958023bf3658ec7c28dc3de9adca9ebb13a2a7 Mon Sep 17 00:00:00 2001 From: yangcal Date: Mon, 26 Apr 2021 12:19:34 -0700 Subject: [PATCH 43/61] cleanup and revert tensor_2d --- quimb/tensor/block_tools.py | 57 ++++++++++++++++++------------------- quimb/tensor/tensor_2d.py | 24 +++------------- quimb/tensor/tensor_core.py | 18 ++++++------ 3 files changed, 40 insertions(+), 59 deletions(-) diff --git a/quimb/tensor/block_tools.py b/quimb/tensor/block_tools.py index 995aade1..735589ad 100644 --- a/quimb/tensor/block_tools.py +++ b/quimb/tensor/block_tools.py @@ -31,34 +31,33 @@ def _add_with_smudge(arr): return arr return apply(T, _add_with_smudge) -''' -bitf.set(symmetry="z22", use_cpp=True, fermion=True) -H = bitf.Hubbard(1,4,0.1) -H.data = abs(H.data) -H.data[H.data==0] = 2.0 -Hsqrt = sqrt(H) -print("sqrt") -print((Hsqrt.data**2-H.data).sum()) +def get_smudge_balance(T1, T2, ix, smudge): + flat = bitf.dispatch_settings("use_cpp") + if flat: + t1, t2 = T1.data.to_sparse(), T2.data.to_sparse() + else: + t1, t2 = T1.data, T2.data + sign1 = t1.pattern[T1.inds.index(ix)] + sign2 = t2.pattern[T2.inds.index(ix)] + s1_pattern = {"+":"-+", "-":"+-"}[sign1] + s2_pattern = {"-":"-+", "+":"+-"}[sign2] -Hi = inv_with_smudge(H) -print("inv") -print(Hi.data*H.data) -Ha = add_with_smudge(H) -print("add") -print(Ha.data-H.data) + inv = (sign1 == sign2) + block_cls = t1.blocks[0].__class__ + block_dict = {} + for iblk1 in t1: + q0 = iblk1.q_labels[0] + block_dict[q0] = np.diag(np.asarray(iblk1)) + smudge + for iblk2 in t2: + q0 = -iblk2.q_labels[0] if inv else iblk2.q_labels[0] + if q0 not in block_dict: continue + block_dict[q0] = block_dict[q0] / (np.diag(np.asarray(iblk2)) + smudge) -bitf.set(symmetry="z2", use_cpp=False, fermion=True) -H = bitf.Hubbard(1,4,0.1).to_flat() -H.data = abs(H.data) -H.data[H.data==0] = 2.0 -H = H.to_sparse() -Hsqrt = sqrt(H).to_flat() -print("sqrt") -print((Hsqrt.data**2-H.to_flat().data).sum()) -Hi = inv_with_smudge(H).to_flat() -print("inv") -print(Hi.data*H.to_flat().data) -Ha = add_with_smudge(H).to_flat() -print("add") -print(Ha.data-H.to_flat().data) -''' + s1 = [block_cls(reduced=np.diag(s**-0.25), q_labels=(qlab,)*2) for qlab, s in block_dict.items()] + s2 = [block_cls(reduced=np.diag(s** 0.25), q_labels=(qlab,)*2) for qlab, s in block_dict.items()] + s1 = t1.__class__(blocks=s1, pattern=s1_pattern) + s2 = t2.__class__(blocks=s2, pattern=s2_pattern) + if flat: + s1 = s1.to_flat() + s2 = s2.to_flat() + return s1, s2 diff --git a/quimb/tensor/tensor_2d.py b/quimb/tensor/tensor_2d.py index 1ec6ee2c..535a5bce 100644 --- a/quimb/tensor/tensor_2d.py +++ b/quimb/tensor/tensor_2d.py @@ -708,7 +708,7 @@ def _contract_boundary_from_bottom_single( # │ │ │ │ │ # ●══●══<══<══< # - self.canonize_row(i, sweep=canonize_sweep, yrange=yrange, max_bond=max_bond, cutoff=cutoff) + self.canonize_row(i, sweep=canonize_sweep, yrange=yrange) # # │ │ │ │ │ --> │ │ │ │ │ --> │ │ │ │ │ @@ -870,7 +870,7 @@ def _contract_boundary_from_top_single( # ●══●══<══<══< # | | | | | # - self.canonize_row(i, sweep=canonize_sweep, yrange=yrange, max_bond=max_bond, cutoff=cutoff) + self.canonize_row(i, sweep=canonize_sweep, yrange=yrange) # # >──●══●══●══● --> >──>──●══●══● --> >──>──>──●══● # | | | | | --> | | | | | --> | | | | | @@ -1035,7 +1035,7 @@ def _contract_boundary_from_left_single( # ║ ║ # ●── ●── # - self.canonize_column(j, sweep=canonize_sweep, xrange=xrange, max_bond=max_bond, cutoff=cutoff) + self.canonize_column(j, sweep=canonize_sweep, xrange=xrange) # # v── ●── # ║ │ @@ -1208,7 +1208,7 @@ def _contract_boundary_from_right_single( # ║ ║ # ──● ──● # - self.canonize_column(j, sweep=canonize_sweep, xrange=xrange, max_bond=max_bond, cutoff=cutoff) + self.canonize_column(j, sweep=canonize_sweep, xrange=xrange) # # ──v ──● # ║ │ @@ -1607,10 +1607,6 @@ def compute_row_environments( env_bottom = self.copy() if dense: env_bottom ^= first_row - else: - for j in range(self.Ly): - env_bottom ^= self.site_tag(0, j) - env_bottom.compress_row(0, sweep="right", compress_opts=compress_opts) row_envs['below', 1] = env_bottom.select(first_row) for i in range(2, env_bottom.Lx): @@ -1626,10 +1622,6 @@ def compute_row_environments( env_top = self.copy() if dense: env_top ^= last_row - else: - for j in range(self.Ly): - env_top ^= self.site_tag(self.Lx-1, j) - env_top.compress_row(self.Lx-1, sweep="right", compress_opts=compress_opts) row_envs['above', self.Lx - 2] = env_top.select(last_row) for i in range(env_top.Lx - 3, -1, -1): @@ -1746,10 +1738,6 @@ def compute_col_environments( env_right = self.copy() if dense: env_right ^= first_column - else: - for i in range(self.Lx): - env_right ^= self.site_tag(i, 0) - env_right.compress_column(0, sweep="up", compress_opts=compress_opts) col_envs['left', 1] = env_right.select(first_column) for j in range(2, env_right.Ly): @@ -1765,10 +1753,6 @@ def compute_col_environments( env_left = self.copy() if dense: env_left ^= last_column - else: - for i in range(self.Lx): - env_left ^= self.site_tag(i, self.Ly-1) - env_left.compress_column(self.Ly-1, sweep="up", compress_opts=compress_opts) col_envs['right', self.Ly - 2] = env_left.select(last_column) for j in range(self.Ly - 3, -1, -1): diff --git a/quimb/tensor/tensor_core.py b/quimb/tensor/tensor_core.py index 8d548ac7..b1685e1e 100644 --- a/quimb/tensor/tensor_core.py +++ b/quimb/tensor/tensor_core.py @@ -1580,7 +1580,7 @@ def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): self._owners = dict() # Short circuit for copying Tensors - if isinstance(data, Tensor): + if isinstance(data, self.__class__): self._data = data.data self._inds = data.inds self._tags = data.tags.copy() @@ -1609,7 +1609,7 @@ def copy(self, deep=False): if deep: return copy.deepcopy(self) else: - return Tensor(self, None) + return self.__class__(self, None) __copy__ = copy @@ -2639,7 +2639,7 @@ class TensorNetwork(object): def __init__(self, ts, *, virtual=False, check_collisions=True): # short-circuit for copying TensorNetworks - if isinstance(ts, TensorNetwork): + if isinstance(ts, self.__class__): self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map) self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map) self.tensor_map = dict() @@ -2667,15 +2667,13 @@ def __and__(self, other): """Combine this tensor network with more tensors, without contracting. Copies the tensors. """ - return TensorNetwork((self, other)) + return self.__class__((self, other)) def __or__(self, other): """Combine this tensor network with more tensors, without contracting. Views the constituent tensors. """ - return TensorNetwork((self, other), virtual=True) - - _EXTRA_PROPS = () + return self.__class__((self, other), virtual=True) @classmethod def from_TN(cls, tn, like=None, inplace=False, **kwargs): @@ -4249,8 +4247,8 @@ def _contract_compressed_tid_sequence( # contract them t_new = t1 @ t2 - if not isinstance(t_new, Tensor): - t_new = Tensor(t_new, tags=t1.tags | t2.tags) + if not isinstance(t_new, t1.__class__): + t_new = t1.__class__(t_new, tags=t1.tags | t2.tags) if info is not None: largest_intermediate = max(largest_intermediate, t_new.size) @@ -4569,7 +4567,7 @@ def insert_operator(self, A, where1, where2, tags=None, inplace=False): # reindex one tensor, and add a new A tensor joining the bonds nbnd = rand_uuid() T2.reindex_({bnd: nbnd}) - TA = Tensor(A, inds=(bnd, nbnd), tags=tags) + TA = A.__class__(A, inds=(bnd, nbnd), tags=tags) tn |= TA return tn From 07117fdb98108b3daccc0c24ceac5ea8a5083a3a Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 28 Apr 2021 15:43:39 -0700 Subject: [PATCH 44/61] refactor fermion module; tensor_block module added for general symmetry --- quimb/tensor/fermion.py | 1629 +++++--------------- quimb/tensor/fermion_2d.py | 1176 +++++++------- quimb/tensor/fermion_gen.py | 16 +- quimb/tensor/tensor_block.py | 583 +++++++ quimb/tensor/test/test_block_numerics.py | 210 +++ quimb/tensor/test/test_fermion_2d.py | 247 +++ quimb/tensor/test/test_fermion_numerics.py | 210 +++ 7 files changed, 2278 insertions(+), 1793 deletions(-) create mode 100644 quimb/tensor/tensor_block.py create mode 100644 quimb/tensor/test/test_block_numerics.py create mode 100644 quimb/tensor/test/test_fermion_2d.py create mode 100644 quimb/tensor/test/test_fermion_numerics.py diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 799e6a7e..6916d5fa 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -1,555 +1,62 @@ -"""Core Fermionic TensorNetwork Module -Note: The position of Fermionic Tensors inside FermionSpace - is defined as the its distance to the ket vacuum, eg, - for |psi> = \hat{Tx} \hat{Ty} \hat{Tz} |0>, - we have the position for these tensors as - Tx:2 Ty:1 Tz:0 +"""Core tensor network tools. """ -import numpy as np -import weakref +import os +import copy import functools -from .tensor_core import (Tensor, TensorNetwork, rand_uuid, tags_to_oset, - _parse_split_opts, check_opt, _VALID_SPLIT_GET) -from .tensor_core import tensor_contract as _tensor_contract -from ..utils import oset, valmap -from .array_ops import asarray, ndim -from . import block_interface - -BondInfo = block_interface.BondInfo - -def _contract_connected(T1, T2, output_inds=None): - """Fermionic contraction of two tensors that are adjacent to each other. - Any shared indexes will be summed over. If the input fermionic tensors - do not belong to the same FermionSpace, the first tensor is assumed to - placed after the second tensor, eg \hat{T1} \hat{T2} - - Parameters - ---------- - T1 : FermionTensor - The first tensor. - T2 : FermionTensor - The second tensor, with matching indices and dimensions to ``T1``. - output_inds : sequence of str - If given, the desired order of output indices, else defaults to the - order they occur in the input indices. - - Returns - ------- - scalar or FermionTensor - """ - info1 = T1.get_fermion_info() - info2 = T2.get_fermion_info() - t1, t2 = T1, T2 - if info1 is not None and info2 is not None: - site1, site2 = info1[1], info2[1] - if abs(site1-site2) != 1: - raise ValueError("T1 and T2 not adjacently connected in FermionSpace") - if site1 < site2: - # if T1 is placed before T2, - # it shall be parsed as second input to tensordot backend - t1, t2 = T2, T1 - ainds, binds = t1.inds, t2.inds - _output_inds = [] - ax_a, ax_b = [], [] - for kia, ia in enumerate(ainds): - if ia not in binds: - _output_inds.append(ia) - else: - ax_a.append(kia) - ax_b.append(binds.index(ia)) - for kib, ib in enumerate(binds): - if ib not in ainds: - _output_inds.append(ib) - if output_inds is None: output_inds = _output_inds - if set(_output_inds) != set(output_inds): - raise TypeError("specified out_inds not allowed in tensordot, \ - make sure no summation/Hadamard product appears") - - out = np.tensordot(t1.data, t2.data, axes=[ax_a, ax_b]) - - if len(output_inds)==0: - return out.data[0] - - if output_inds!=_output_inds: - transpose_order = tuple([_output_inds.index(ia) for ia in output_inds]) - out = out.transpose(transpose_order) - o_tags = oset.union(*(T1.tags, T2.tags)) - out = FermionTensor(out, inds=output_inds, tags=o_tags) - return out - -def _contract_pairs(fs, tid_or_site1, tid_or_site2, output_inds=None, direction='left'): - """ Perform pairwise contraction for two tensors in a specified fermion space. - If the two tensors are not adjacent, move one of the tensors in the given direction. - Note this could alter the tensors that are in between the two tensors in the fermion space - - Parameters - ---------- - fs : FermionSpace obj - the FermionSpace obj that contains the two tensors - tid_or_site1: a string or an integer - The string that specifies the id for the first tensor or the site for the first tensor - tid_or_site2: a string or an integer - The string that specifies the id for the 2nd tensor or the site for the 2nd tensor - output_inds: a list of strings - The list that specifies the output indices and its order - direction: string "left" or "right" - The direction to move tensors if the two tensors are not adjacent - - Returns - ------- - scalar or FermionTensor - """ - tid1, site1, tsr1 = fs[tid_or_site1] - tid2, site2, tsr2 = fs[tid_or_site2] - - if not fs.is_adjacent(tid1, tid2): - fs.make_adjacent(tid1, tid2, direction) - - if direction=="left": - site1 = min(site1, site2) - else: - site1 = max(site1, site2) - 1 - site2 = site1 + 1 - return _contract_connected(tsr1, tsr2, output_inds) - -def _fetch_fermion_space(*tensors, inplace=True): - """ Retrieve the FermionSpace and the associated tensor_ids for the tensors. - If the given tensors all belong to the same FermionSpace object (fsobj), - the underlying fsobj will be returned. Otherwise, a new FermionSpace will be created, - and the tensors will be placed in the same order as the input tensors. - - Parameters - ---------- - tensors : a tuple or list of FermionTensors - input_tensors - inplace: bool - if not true, a new FermionSpace will be created with all tensors copied. - so subsequent operations on the fsobj will not alter the input tensors. +import numpy as np - Returns - ------- - fs : a FermionSpace object - tid_lst: a list of strings for the tensor_ids - """ - if isinstance(tensors, (FermionTensor, FermionTensorNetwork)): - tensors = (tensors, ) +from ..utils import (check_opt, oset, valmap) +from .drawing import draw_tn - if is_mergeable(*tensors): - if isinstance(tensors[0], FermionTensor): - fs = tensors[0].fermion_owner[1]() +from .tensor_core import Tensor, TensorNetwork, _parse_split_opts, oset_union, tags_to_oset, rand_uuid, _parse_split_opts +from .tensor_core import tensor_contract as _tensor_contract +from .tensor_block import tensor_split as _tensor_split +from .tensor_block import _core_contract, tensor_canonize_bond, tensor_compress_bond, BlockTensor, BlockTensorNetwork +from .block_tools import apply, get_smudge_balance +from .block_interface import dispatch_settings +from functools import wraps + +def contract_decorator(fn): + @wraps(fn) + def wrapper(T1, T2, *args, **kwargs): + tid1, site1 = T1.get_fermion_info() + tid2, site2 = T2.get_fermion_info() + fs = T1.fermion_owner[0] + if site1 > site2: + fs.move(tid1, site2+1) + out = fn(T1, T2, *args, **kwargs) else: - fs = tensors[0].fermion_space - if not inplace: - fs = fs.copy() - tid_lst = [] - for tsr_or_tn in tensors: - if isinstance(tsr_or_tn, FermionTensor): - tid_lst.append(tsr_or_tn.get_fermion_info()[0]) - else: - tid_lst.append(tsr_or_tn.tensor_map.keys()) - else: - fs = FermionSpace() - for tsr_or_tn in tensors: - if isinstance(tsr_or_tn, FermionTensor): - fs.add_tensor(tsr_or_tn, virtual=inplace) - elif isinstance(tsr_or_tn, FermionTensorNetwork): - if not tsr_or_tn.is_continuous(): - raise ValueError("Input Network not continous, merge not allowed") - for itsr in tsr_or_tn: - fs.add_tensor(itsr, virtual=inplace) - tid_lst = list(fs.tensor_order.keys()) - return fs, tid_lst - -def tensor_contract(*tensors, output_inds=None, - direction="left", inplace=False, **contract_opts): - """ Perform tensor contractions for all given tensors. - If input tensors do not belong to the same underlying fsobj, - the position of each tensor will be the same as its order in the input tensor tuple/list. - Summation and Hadamard product not supported as it's not well defined for fermionic tensors - - Parameters - ---------- - tensors : a tuple or list of FermionTensors - input tensors - output_inds: a list of strings - direction: string "left" or "right" - The direction to move tensors if the two tensors are not adjacent - inplace: bool, optional - whether to move/contract tensors in place. - - Returns - ------- - out : a FermionTensor object or a number - """ - path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) - fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) - for conc in path_info.contraction_list: - pos1, pos2 = conc[0] - tid1 = tid_lst.pop(pos1) - tid2 = tid_lst.pop(pos2) - site1 = fs[tid1][1] - site2 = fs[tid2][1] - out = fs._contract_pairs(site1, site2, direction=direction, inplace=True) + fs.move(tid2, site1+1) + out = fn(T2, T1, *args, **kwargs) if not isinstance(out, (float, complex)): - tid_lst.append(out.fermion_owner[2]) - - if not isinstance(out, (float, complex)): - _output_inds = out.inds - if output_inds is None: - output_inds = _output_inds - else: - output_inds = tuple(output_inds) - if set(_output_inds) != set(output_inds): - raise TypeError("specified out_inds not allow in tensordot, \ - make sure not summation/Hadamard product appears") - if output_inds!=_output_inds: - out = out.transpose(*output_inds, inplace=True) - return out - -def tensor_split( - T, - left_inds, - method='svd', - get=None, - absorb='both', - max_bond=None, - cutoff=1e-10, - cutoff_mode='rel', - renorm=None, - ltags=None, - rtags=None, - stags=None, - bond_ind=None, - right_inds=None, - qpn_info=None -): - """Decompose this Fermionic tensor into two fermionic tensors. - - Parameters - ---------- - T : FermionTensor - The fermionic tensor to split. - left_inds : str or sequence of str - The index or sequence of inds, which ``T`` should already have, to - split to the 'left'. You can supply ``None`` here if you supply - ``right_inds`` instead. - method : str, optional - How to split the tensor, only some methods allow bond truncation: - - - ``'svd'``: full SVD, allows truncation. - - get : {None, 'arrays', 'tensors', 'values'} - If given, what to return instead of a TN describing the split: - - - ``None``: a tensor network of the two (or three) tensors. - - ``'arrays'``: the raw data arrays (pyblock3.algebra.fermion.FlatFermionTensor) as - a tuple ``(l, r)`` or ``(l, s, r)`` depending on ``absorb``. - - ``'tensors '``: the new tensors as a tuple ``(Tl, Tr)`` or - ``(Tl, Ts, Tr)`` depending on ``absorb``. - - ``'values'``: only compute and return the singular values ``s``. - - absorb : {'both', 'left', 'right', None}, optional - Whether to absorb the singular values into both, the left, or the right - unitary matrix respectively, or neither. If neither (``absorb=None``) - then the singular values will be returned separately as a 2D FermionTensor. - If ``get='tensors'`` or ``get='arrays'`` then a tuple like - ``(left, s, right)`` is returned. - max_bond : None or int - If integer, the maxmimum number of singular values to keep, regardless - of ``cutoff``. - cutoff : float, optional - The threshold below which to discard singular values, only applies to - rank revealing methods (not QR, LQ, or cholesky). - cutoff_mode : {'sum2', 'rel', 'abs', 'rsum2'} - Method with which to apply the cutoff threshold: - - - ``'rel'``: values less than ``cutoff * s[0]`` discarded. - - ``'abs'``: values less than ``cutoff`` discarded. - - ``'sum2'``: sum squared of values discarded must be ``< cutoff``. - - ``'rsum2'``: sum squared of values discarded must be less than - ``cutoff`` times the total sum of squared values. - - ``'sum1'``: sum values discarded must be ``< cutoff``. - - ``'rsum1'``: sum of values discarded must be less than - ``cutoff`` times the total sum of values. - - renorm : {None, bool, or int}, optional - Whether to renormalize the kept singular values, assuming the bond has - a canonical environment, corresponding to maintaining the Frobenius - norm or trace. If ``None`` (the default) then this is automatically - turned on only for ``cutoff_method in {'sum2', 'rsum2', 'sum1', - 'rsum1'}`` with ``method in {'svd', 'eig', 'eigh'}``. - ltags : sequence of str, optional - Add these new tags to the left tensor. - rtags : sequence of str, optional - Add these new tags to the right tensor. - stags : sequence of str, optional - Add these new tags to the singular value tensor. - bond_ind : str, optional - Explicitly name the new bond, else a random one will be generated. - right_inds : sequence of str, optional - Explicitly give the right indices, otherwise they will be worked out. - This is a minor performance feature. - - Returns - ------- - FermionTensorNetwork or tuple[FermionTensor] or tuple[array] or 1D-array - Depending on if ``get`` is ``None``, ``'tensors'``, ``'arrays'``, or - ``'values'``. In the first three cases, if ``absorb`` is set, then the - returned objects correspond to ``(left, right)`` whereas if - ``absorb=None`` the returned objects correspond to - ``(left, singular_values, right)``. - """ - check_opt('get', get, _VALID_SPLIT_GET) - - if left_inds is None: - left_inds = oset(T.inds) - oset(right_inds) - else: - left_inds = tags_to_oset(left_inds) - - if right_inds is None: - right_inds = oset(T.inds) - oset(left_inds) - - opts = _parse_split_opts( - method, cutoff, absorb, max_bond, cutoff_mode, renorm) - _left_inds = [T.inds.index(i) for i in left_inds] - _right_inds =[T.inds.index(i) for i in right_inds] - - if method == "svd": - left, s, right = T.data.tensor_svd(_left_inds, right_idx=_right_inds, qpn_info=qpn_info, **opts) - else: - raise NotImplementedError - - if get == 'arrays': - if absorb is None: - return left, s, right - return left, right - - ltags = T.tags | tags_to_oset(ltags) - rtags = T.tags | tags_to_oset(rtags) - if bond_ind is None: - if absorb is None: - bond_ind = (rand_uuid(),) * 2 + fs.replace_tensor(min(site1, site2), out, virtual=True) + fs.remove_tensor(min(site1, site2)+1) + return out + return wrapper + +_core_contract = contract_decorator(_core_contract) + +def compress_decorator(fn): + @wraps(fn) + def wrapper(T1, T2, *args, **kwargs): + tid1, site1 = T1.get_fermion_info() + tid2, site2 = T2.get_fermion_info() + fs = T1.fermion_owner[0] + loc_dict = {tid1: site1, tid2: site2} + if site1 > site2: + fs.move(tid1, site2+1) else: - bond_ind = (rand_uuid(),) - elif isinstance(bond_ind, str): - bond_ind = (bond_ind,) * 2 - - Tl = FermionTensor(data=left, inds=(*left_inds, bond_ind[0]), tags=ltags) - Tr = FermionTensor(data=right, inds=(bond_ind[-1], *right_inds), tags=rtags) - - if absorb is None: - stags = T.tags | tags_to_oset(stags) - Ts = FermionTensor(data=s, inds=bond_ind, tags=stags) - tensors = (Tl, Ts, Tr) - else: - tensors = (Tl, Tr) - - if get == 'tensors': - return tensors - - return FermionTensorNetwork(tensors[::-1], check_collisions=False, virtual=True) - -def _compress_connected(Tl, Tr, absorb='both', **compress_opts): - """Compression of two Fermionic tensors that are adjacent to each other. - - Parameters - ---------- - Tl : FermionTensor - The left tensor. - Tr : FermionTensor - The right tensor, with matching indices and dimensions to ``T1``. - absorb : {'both', 'left', 'right', None}, optional - Where to absorb the singular values after decomposition. - compress_opts : - Supplied to :func:`~quimb.tensor.fermion.tensor_split`. - - Returns - ------- - two fermionic Tensors - """ - - if Tl.inds == Tr.inds: - return Tl, Tr - left_inds = [ind for ind in Tl.inds if ind not in Tr.inds] - right_inds = [ind for ind in Tr.inds if ind not in Tl.inds] - out = _contract_connected(Tl, Tr) - qpn_info = (Tl.data.dq, Tr.data.dq) - if Tl.get_fermion_info()[1] < Tr.get_fermion_info()[1]: - if absorb == "left": - absorb = "right" - elif absorb == "right": - absorb = "left" - r, l = out.split(left_inds=right_inds, right_inds=left_inds, - absorb=absorb, get="tensors", qpn_info=qpn_info, **compress_opts) - else: - l, r = out.split(left_inds=left_inds, right_inds=right_inds, - absorb=absorb, get="tensors", qpn_info=qpn_info, **compress_opts) - return l, r - -def tensor_compress_bond( - T1, - T2, - absorb='both', - inplace=True, - info=None, - **compress_opts -): - """compress between the two single fermionic tensors. - - Parameters - ---------- - T1 : FermionTensor - The left tensor. - T2 : FermionTensor - The right tensor. - absorb : {'both', 'left', 'right', None}, optional - Where to absorb the singular values after decomposition. - info : None or dict, optional - A dict for returning extra information such as the singular values. - compress_opts : - Supplied to :func:`~quimb.tensor.fermion.tensor_split`. - """ - fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=inplace) - site1, site2 = fs[tid1][1], fs[tid2][1] - fs.make_adjacent(tid1, tid2) - l, r = _compress_connected(T1, T2, absorb, **compress_opts) - T1.modify(data=l.data, inds=l.inds) - T2.modify(data=r.data, inds=r.inds) - tid_map = {tid1: site1, tid2:site2} - fs._reorder_from_dict(tid_map) - return T1, T2 - -def _canonize_connected(T1, T2, absorb='right', **split_opts): - """Compression of two Fermionic tensors that are adjacent to each other. - - Parameters - ---------- - T1 : FermionTensor - The left tensor. - T2 : FermionTensor - The right tensor, with matching indices and dimensions to ``T1``. - absorb : {'both', 'left', 'right', None}, optional - Where to absorb the singular values after decomposition. - split_opts : - Supplied to :func:`~quimb.tensor.fermion.tensor_split`. - - Returns - ------- - two fermionic Tensors - """ - if absorb == 'both': - return _compress_connected(T1, T2, absorb=absorb, **split_opts) - if absorb == "left": - T1, T2 = T2, T1 - - shared_ix, left_env_ix = T1.filter_bonds(T2) - if not shared_ix: - raise ValueError("The tensors specified don't share an bond.") - - if T1.get_fermion_info()[1] < T2.get_fermion_info()[1]: - qpn_info = (T1.data.dq.__class__(0), T1.data.dq) - tRfact, new_T1 = T1.split(shared_ix, get="tensors", qpn_info=qpn_info, **split_opts) - new_T2 = _contract_connected(T2, tRfact) - else: - qpn_info = (T1.data.dq, T1.data.dq.__class__(0)) - new_T1, tRfact = T1.split(left_env_ix, get='tensors', qpn_info=qpn_info, **split_opts) - new_T2 = _contract_connected(tRfact, T2) - - if absorb == "left": - return new_T2, new_T1 - else: - return new_T1, new_T2 - -def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): - r"""Inplace 'canonization' of two fermionic tensors. This gauges the bond - between the two such that ``T1`` is isometric - - Parameters - ---------- - T1 : FermionTensor - The tensor to be isometrized. - T2 : FermionTensor - The tensor to absorb the R-factor into. - split_opts - Supplied to :func:`~quimb.tensor.fermion.tensor_split`, with - modified defaults of ``method=='svd'`` and ``absorb='right'``. - """ - check_opt('absorb', absorb, ('left', 'both', 'right')) - - if absorb == 'both': - return tensor_compress_bond(T1, T2, absorb=absorb, **split_opts) - - fs, (tid1, tid2) = _fetch_fermion_space(T1, T2, inplace=True) - site1, site2 = fs[tid1][1], fs[tid2][1] - - fs.make_adjacent(tid1, tid2) - l, r = _canonize_connected(T1, T2, absorb, **split_opts) - T1.modify(data=l.data, inds=l.inds) - T2.modify(data=r.data, inds=r.inds) - tid_map = {tid1: site1, tid2:site2} - fs._reorder_from_dict(tid_map) + fs.move(tid1, site2) + fn(T1, T2, *args, **kwargs) + fs._reorder_from_dict(loc_dict) + return T1, T2 + return wrapper -def tensor_balance_bond(t1, t2, smudge=1e-6): - """Gauge the bond between two tensors such that the norm of the 'columns' - of the tensors on each side is the same for each index of the bond. - - Parameters - ---------- - t1 : FermionTensor - The first tensor, should share a single index with ``t2``. - t2 : FermionTensor - The second tensor, should share a single index with ``t1``. - smudge : float, optional - Avoid numerical issues by 'smudging' the correctional factor by this - much - the gauging introduced is still exact. - """ - from pyblock3.algebra.core import SubTensor - from pyblock3.algebra.fermion import SparseFermionTensor - ix, = t1.bonds(t2) - t1H = t1.H.reindex_({ix: ix+'*'}) - t2H = t2.H.reindex_({ix: ix+'*'}) - out1 = _contract_connected(t1H, t1) - out2 = _contract_connected(t2H, t2) - sblk1 = [] - sblk2 = [] - for iblk1 in out1.data.to_sparse(): - for iblk2 in out2.data.to_sparse(): - if iblk1.q_labels != iblk2.q_labels: - continue - x = np.diag(np.asarray(iblk1)) - y = np.diag(np.asarray(iblk2)) - s = (x + smudge) / (y + smudge) - sblk1.append(SubTensor(reduced=np.diag(s**-0.25), q_labels=iblk1.q_labels)) - sblk2.append(SubTensor(reduced=np.diag(s**0.25), q_labels=iblk2.q_labels)) - - sign1 = t1.data.pattern[t1.inds.index(ix)] - sign2 = t2.data.pattern[t2.inds.index(ix)] - s1_pattern = {"+":"-+", "-":"+-"}[sign1] - s2_pattern = {"-":"-+", "+":"+-"}[sign2] - s1 = SparseFermionTensor(blocks=sblk1, pattern=s1_pattern).to_flat() - s2 = SparseFermionTensor(blocks=sblk2, pattern=s2_pattern).to_flat() - t1.multiply_index_diagonal_(ix, s1, location="back") - t2.multiply_index_diagonal_(ix, s2, location="front") +tensor_compress_bond = compress_decorator(tensor_compress_bond) +tensor_canonize_bond = compress_decorator(tensor_canonize_bond) class FermionSpace: - """A labelled, ordered dictionary. The tensor labels point to the tensor - and its position inside the fermion space. - - Parameters - ---------- - tensor_order : dictionary, optional - tensor_order[tid] = (tensor, site) - virtual: bool, optional - whether the FermionSpace should be a *view* onto the tensors it is - given, or a copy of them. - - Attributes - ---------- - tensor_map : dict - Mapping of unique ids to tensors and its location, like``{tensor_id: (tensor, site) ...}``. I.e. this is where the tensors are 'stored' by the FermionSpace. - """ - def __init__(self, tensor_order=None, virtual=True): self.tensor_order = {} if tensor_order is not None: @@ -565,17 +72,6 @@ def sites(self): else: return [val[1] for val in self.tensor_order.values()] - def is_continuous(self): - """ Check whether the tensors are continously placed in the Fermion Space - """ - sites = self.sites - if len(sites) == 0: - return True - else: - if np.unique(sites).size != len(sites): - raise ValueError("at least one site is occupied multiple times") - return len(sites) == (max(sites)-min(sites)+1) - def copy(self): """ Copy the FermionSpace object. Tensor ids and positions will be preserved and tensors will be copied @@ -604,13 +100,13 @@ def add_tensor(self, tsr, tid=None, site=None, virtual=False): tid = rand_uuid(base="_T") if site is None: site = 0 if len(self.sites)==0 else max(self.sites) + 1 - if site not in self.sites: - T = tsr if virtual else tsr.copy() - self.tensor_order[tid] = (T, site) - T.set_fermion_owner(self, tid) - else: + elif site in self.sites: raise ValueError("site:%s occupied, use replace/insert_tensor method"%site) + T = tsr if virtual else tsr.copy() + self.tensor_order[tid] = (T, site) + T.set_fermion_owner(self, tid) + def replace_tensor(self, site, tsr, tid=None, virtual=False): """ Replace the tensor at a given site, eg 0123456789 0123456789 @@ -627,11 +123,11 @@ def replace_tensor(self, site, tsr, tid=None, virtual=False): virtual: bool, optional whether to replace the tensor inplace """ - atid, _, atsr = self[site] + atid = self.get_tid_from_site(site) + atsr = self.tensor_order[atid][0] T = tsr if virtual else tsr.copy() if tid is None or (tid in self.tensor_order.keys() and tid != atid): tid = atid - T.set_fermion_owner(self, tid) atsr.remove_fermion_owner() del self.tensor_order[atid] @@ -666,7 +162,6 @@ def insert_tensor(self, site, tsr, tid=None, virtual=False): self.tensor_order.update({atid: (atsr, asite+1)}) self.tensor_order.update({tid: (T, site)}) - def insert(self, site, *tsrs, virtual=False): """ insert a group of tensors at a given site, all tensors afterwards will be shifted forward accordingly, eg, @@ -686,7 +181,7 @@ def insert(self, site, *tsrs, virtual=False): self.insert_tensor(site, T, virtual=virtual) site += 1 - def get_tid(self, site): + def get_tid_from_site(self, site): """ Return the tensor id at given site Parameters @@ -699,6 +194,14 @@ def get_tid(self, site): idx = self.sites.index(site) return list(self.tensor_order.keys())[idx] + def get_full_info(self, tid_or_site): + if isinstance(tid_or_site, str): + tid = tid_or_site + else: + tid = self.get_tid_from_site(self, tid_or_site) + T, site = self.tensor_order[tid_or_site] + return T, tid, site + def _reorder_from_dict(self, tid_map): """ Reorder tensors from a tensor_id/position mapping. Pizorn algorithm will be applied during moving @@ -717,35 +220,11 @@ def _reorder_from_dict(self, tid_map): ind = des_sites.index(isite) self.move(tid_lst[ind], isite) - def is_adjacent(self, tid1, tid2): - """ Check whether two tensors are adjacently placed in the space - """ - site1 = self.tensor_order[tid1][1] - site2 = self.tensor_order[tid2][1] - return abs(site1-site2) == 1 - - def __getitem__(self, tid_or_site): - """Return a tuple of (tensor id, position, tensor) from the tag (tensor id or position) - """ - if isinstance(tid_or_site, str): - if tid_or_site not in self.tensor_order.keys(): - raise KeyError("tid:%s not found"%tid_or_site) - tsr, site = self.tensor_order[tid_or_site] - return tid_or_site, site, tsr - elif isinstance(tid_or_site, int): - if tid_or_site not in self.sites: - raise KeyError("site:%s not occupied"%tid_or_site) - tid = self.get_tid(tid_or_site) - tsr = self.tensor_order[tid][0] - return tid, tid_or_site, tsr - else: - raise ValueError("not a valid key value(tid or site)") - def __setitem__(self, site, tsr): if site in self.sites: - self.replace_tensor(site, tsr) + self.replace_tensor(site, tsr, virtual=True) else: - self.add_tensor(site, tsr) + self.add_tensor(site, tsr, virtual=True) def move(self, tid_or_site, des_site): """ Move a tensor inside this FermionSpace to the specified position with Pizorn algorithm. @@ -759,12 +238,12 @@ def move(self, tid_or_site, des_site): the position to move the tensor to """ - tid, site, tsr = self[tid_or_site] + tsr, tid, site = self.get_full_info(tid_or_site) if site == des_site: return move_left = (des_site < site) iterator = range(des_site, site) if move_left else range(site+1, des_site+1) shared_inds = [] - tid_lst = [self[isite][0] for isite in iterator] + tid_lst = [self.get_tid_from_site(isite) for isite in iterator] parity = 0 for itid in tid_lst: itsr, isite = self.tensor_order[itid] @@ -775,9 +254,16 @@ def move(self, tid_or_site, des_site): else: self.tensor_order[itid] = (itsr, isite-1) global_parity = (parity % 2) * tsr.data.parity - if global_parity != 0: tsr.data._global_flip() axes = [tsr.inds.index(i) for i in shared_inds] - if len(axes)>0: tsr.data._local_flip(axes) + if global_parity == 0 and len(axes) ==0: + new_data = tsr.data + else: + new_data = tsr.data.copy() + if global_parity !=0: + new_data._global_flip() + if len(axes)>0: + new_data._local_flip(axes) + tsr.modify(data=new_data) self.tensor_order[tid] = (tsr, des_site) def move_past(self, tsr, site_range=None): @@ -796,9 +282,8 @@ def move_past(self, tsr, site_range=None): sites = self.sites site_range = (min(sites), max(sites)+1) start, end = site_range - iterator = range(start, end) shared_inds = [] - tid_lst = [self[isite][0] for isite in iterator] + tid_lst = [self.get_tid_from_site(isite) for isite in range(start, end)] parity = 0 for itid in tid_lst: itsr, isite = self.tensor_order[itid] @@ -813,7 +298,9 @@ def move_past(self, tsr, site_range=None): def make_adjacent(self, tid1, tid2, direction='left'): """ Move one tensor in the specified direction to make the two adjacent """ - if not self.is_adjacent(tid1, tid2): + site1 = self.tensor_order[tid1][1] + site2 = self.tensor_order[tid2][1] + if abs(site1-site2)!=1: site1 = self.tensor_order[tid1][1] site2 = self.tensor_order[tid2][1] if site1 == site2: return @@ -825,61 +312,18 @@ def make_adjacent(self, tid1, tid2, direction='left'): else: raise ValueError("direction %s not recognized"%direction) - def _contract_pairs(self, tid_or_site1, tid_or_site2, out_inds=None, direction='left', inplace=True): - """ Contract two tensors in the FermionSpace - - Parameters - ---------- - tid_or_site1 : string or int - Tensor id or position for the 1st tensor - tid_or_site2 : string or int - Tensor id or position for the 2nd tensor - out_inds: list of string, optional - The order for the desired output indices - direction: string - The direction to move tensors if the two are not adjacent - inplace: bool - Whether to contract/move tensors inplace or in a copied FermionSpace - - Returns - ------- - scalar or a FermionTensor - """ - fs = self if inplace else self.copy() - out = _contract_pairs(fs, tid_or_site1, tid_or_site2, out_inds, direction) - - if isinstance(out, (float, complex)): - return out - - site1 = fs[tid_or_site1][1] - site2 = fs[tid_or_site2][1] - - if direction=="left": - site1 = min(site1, site2) - else: - site1 = max(site1, site2) - 1 - site2 = site1 + 1 - # the output fermion tensor will replace the two input tensors in the space - fs.replace_tensor(site1, out, virtual=True) - fs.remove_tensor(site2) - - return out - - def remove_tensor(self, tid_or_site): + def remove_tensor(self, site): """ remove a specified tensor at a given site, eg 012345 01234 ABCDEF, (3, True) -> ABCEF """ - tid, site, tsr = self[tid_or_site] + tid = self.get_tid_from_site(site) + tsr = self.tensor_order[tid][0] tsr.remove_fermion_owner() del self.tensor_order[tid] - indent_sites = [] - for isite in self.sites: - if isite > site: - indent_sites.append(isite) - indent_sites = sorted(indent_sites) - tid_lst = [self.get_tid(isite) for isite in indent_sites] + indent_sites = sorted([isite for isite in self.sites if isite>site]) + tid_lst = [self.get_tid_from_site(isite) for isite in indent_sites] for tid in tid_lst: tsr, site = self.tensor_order[tid] self.tensor_order[tid] = (tsr, site-1) @@ -892,244 +336,202 @@ def H(self): new_fs = FermionSpace() for tid, (tsr, site) in self.tensor_order.items(): T = tsr.copy() - new_data = T.data.dagger - new_inds = T.inds[::-1] - T.modify(data=new_data, inds=new_inds) + T.modify(data=T.data.dagger, inds=T.inds[::-1]) new_fs.add_tensor(T, tid, max_site-site, virtual=True) return new_fs + def _contract_pairs(self, tid1, tid2, output_inds=None): + self.make_adjacent(tid1, tid2) + T1, site1 = self.tensor_order[tid1] + T2, site2 = self.tensor_order[tid2] + out = _contract_connected(T1, T2, output_inds) + self.replace_tensor(min(site1, site2), out, virtual=True) + self.remove_tensor(max(site1, site2)) -class FermionTensor(Tensor): - """A labelled, tagged ndarray. The index labels are used instead of - axis numbers to identify dimensions, and are preserved through operations. - - Parameters - ---------- - data : pyblock3.algebra.fermion.FlatFermionTensor - The n-dimensional data. - inds : sequence of str - The index labels for each dimension. Must match the number of - dimensions of ``data``. - tags : sequence of str, optional - Tags with which to identify and group this tensor. These will - be converted into a ``oset``. - left_inds : sequence of str, optional - Which, if any, indices to group as 'left' indices of an effective - matrix. This can be useful, for example, when automatically applying - unitary constraints to impose a certain flow on a tensor network but at - the atomistic (Tensor) level. - """ - def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): - - # a new or copied Tensor always has no owners - self._owners = dict() - self._fermion_owner = None - # Short circuit for copying Tensors - if isinstance(data, self.__class__): - self._data = data.data.copy() - self._inds = data.inds - self._tags = data.tags.copy() - self._left_inds = data.left_inds - return - - self._data = data # asarray(data) - self._inds = tuple(inds) - self._tags = tags_to_oset(tags) - self._left_inds = tuple(left_inds) if left_inds is not None else None - - nd = ndim(self._data) - if nd != len(self.inds): - raise ValueError( - f"Wrong number of inds, {self.inds}, supplied for array" - f" of shape {self._data.shape}.") - - if self.left_inds and any(i not in self.inds for i in self.left_inds): - raise ValueError(f"The 'left' indices {self.left_inds} are not " - f"found in {self.inds}.") - - @property - def symmetry(self): - return self.data.symmetry +# --------------------------------------------------------------------------- # +# Tensor Funcs # +# --------------------------------------------------------------------------- # - @property - def fermion_owner(self): - return self._fermion_owner +def tensor_contract(*tensors, output_inds=None, inplace=False, **contract_opts): + path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) + fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) + if inplace: + tensors = list(tensors) + else: + tensors = [fs.tensor_order[tid][0] for tid in tid_lst] - @property - def parity(self): - return self.data.parity + for conc in path_info.contraction_list: + pos1, pos2 = sorted(conc[0]) + T2 = tensors.pop(pos2) + T1 = tensors.pop(pos1) + out = _core_contract(T1, T2) + tensors.append(out) - def norm(self): - """Frobenius norm of this tensor. - """ - return np.linalg.norm(self.data.data, 2) - - def ind_size(self, dim_or_ind): - if isinstance(dim_or_ind, str): - if dim_or_ind not in self.inds: - raise ValueError("%s indice not found in the tensor"%dim_or_ind) - dim_or_ind = self.inds.index(dim_or_ind) - ipattern = self.data.pattern[dim_or_ind] - if ipattern=="+": - sz = [self.symmetry.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] + if not isinstance(out, (float, complex)): + _output_inds = out.inds + if output_inds is None: + output_inds = _output_inds else: - sz = [-self.symmetry.from_flat(ix) for ix in self.data.q_labels[:,dim_or_ind]] - sp = self.data.shapes[:,dim_or_ind] - bond_dict = dict(zip(sz, sp)) - return BondInfo(bond_dict) + output_inds = tuple(output_inds) + if output_inds!=_output_inds: + out.transpose_(*output_inds) + return out - def copy(self, deep=False): - """Copy this tensor. Note by default (``deep=False``), the underlying - array will *not* be copied. The fermion owner will to reset to None - """ - if deep: - t = copy.deepcopy(self) - t.remove_fermion_owner() - else: - t = self.__class__(self, None) - return t +def tensor_split( + T, + left_inds, + method='svd', + get=None, + absorb='both', + max_bond=None, + cutoff=1e-10, + cutoff_mode='rel', + renorm=None, + ltags=None, + rtags=None, + stags=None, + bond_ind=None, + right_inds=None, + qpn_info = None, +): + if get is not None: + return _tensor_split(T, left_inds, method=method, get=get, absorb=absorb, max_bond=max_bond, + cutoff=cutoff, cutoff_mode=cutoff_mode, renorm=renorm, ltags=ltags, rtags=rtags, + stags=stags, bond_ind=bond_ind, right_inds=right_inds, qpn_info=qpn_info) + else: + tensors = _tensor_split(T, left_inds, method=method, get="tensors", absorb=absorb, max_bond=max_bond, + cutoff=cutoff, cutoff_mode=cutoff_mode, renorm=renorm, ltags=ltags, rtags=rtags, + stags=stags, bond_ind=bond_ind, right_inds=right_inds, qpn_info=qpn_info) + return FermionTensorNetwork(tensors[::-1], check_collisions=False) - def multiply_index_diagonal(self, ind, x, inplace=False, location="front"): - """Multiply this tensor by 1D array ``x`` as if it were a diagonal - tensor being contracted into index ``ind``. - """ - if location not in ["front", "back"]: - raise ValueError("invalid for the location of the diagonal") - t = self if inplace else self.copy(full=True) - ax = t.inds.index(ind) - if isinstance(x, FermionTensor): - x = x.data - if location=="front": - out = np.tensordot(x, t.data, axes=((1,), (ax,))) - transpose_order = list(range(1, ax+1)) + [0] + list(range(ax+1, t.ndim)) +def is_mergeable(*ts_or_tsn): + """Check if all FermionTensor or FermionTensorNetwork objects + are part of the same FermionSpace + """ + if len(ts_or_tsn)==1 and isinstance(ts_or_tsn, (FermionTensor, FermionTensorNetwork)): + return True + fs_lst = [] + site_lst = [] + for obj in ts_or_tsn: + if isinstance(obj, FermionTensor): + if obj.fermion_owner is None: + return False + fsobj, tid = obj.fermion_owner + fs_lst.append(hash(fsobj)) + site_lst.append(fsobj.tensor_order[tid][1]) + elif isinstance(obj, FermionTensorNetwork): + fs_lst.append(hash(obj.fermion_space)) + site_lst.extend(obj.filled_sites) else: - out = np.tensordot(t.data, x, axes=((ax,),(0,))) - transpose_order = list(range(ax)) + [t.ndim-1] + list(range(ax, t.ndim-1)) - data = np.transpose(out, transpose_order) - t.modify(data=data) - return t + raise TypeError("unable to find fermionspace") - multiply_index_diagonal_ = functools.partialmethod( - multiply_index_diagonal, inplace=True) + return all([fs==fs_lst[0] for fs in fs_lst]) and len(set(site_lst)) == len(site_lst) - def get_fermion_info(self): - if self.fermion_owner is None: - return None - fs, tid = self.fermion_owner[1:] - return (tid, fs().tensor_order[tid][1]) +def _fetch_fermion_space(*tensors, inplace=True): + """ Retrieve the FermionSpace and the associated tensor_ids for the tensors. + If the given tensors all belong to the same FermionSpace object (fsobj), + the underlying fsobj will be returned. Otherwise, a new FermionSpace will be created, + and the tensors will be placed in the same order as the input tensors. - def contract(self, *others, output_inds=None, **opts): - return tensor_contract(self, *others, output_inds=output_inds, **opts) + Parameters + ---------- + tensors : a tuple or list of FermionTensors + input_tensors + inplace: bool + if not true, a new FermionSpace will be created with all tensors copied. + so subsequent operations on the fsobj will not alter the input tensors. - @fermion_owner.setter - def fermion_owner(self, fowner): - self._fermion_owner = fowner + Returns + ------- + fs : a FermionSpace object + tid_lst: a list of strings for the tensor_ids + """ - def set_fermion_owner(self, fs, tid): - self.fermion_owner = (hash(fs), weakref.ref(fs), tid) + if is_mergeable(*tensors): + if isinstance(tensors[0], FermionTensor): + fs = tensors[0].fermion_owner[0] + else: + fs = tensors[0].fermion_space + if not inplace: + fs = fs.copy() + tid_lst = [] + for tsr_or_tn in tensors: + if isinstance(tsr_or_tn, FermionTensor): + tid_lst.append(tsr_or_tn.get_fermion_info()[0]) + else: + tid_lst.append(tsr_or_tn.tensor_map.keys()) + else: + fs = FermionSpace() + for tsr_or_tn in tensors[::-1]: + if isinstance(tsr_or_tn, FermionTensor): + fs.add_tensor(tsr_or_tn, virtual=inplace) + elif isinstance(tsr_or_tn, FermionTensorNetwork): + if not tsr_or_tn.is_continuous(): + raise ValueError("Input Network not continous, merge not allowed") + for itsr in tsr_or_tn: + fs.add_tensor(itsr, virtual=inplace) + tid_lst = list(fs.tensor_order.keys()) + return fs, tid_lst - def remove_fermion_owner(self): - self.fermion_owner = None +# --------------------------------------------------------------------------- # +# Tensor Class # +# --------------------------------------------------------------------------- # - def isel(self, selectors, inplace=False): - raise NotImplementedError +class FermionTensor(BlockTensor): - def expand_ind(self, ind, size): - raise NotImplementedError + __slots__ = ('_data', '_inds', '_tags', '_left_inds', '_owners', '_fermion_owner') - def new_ind(self, name, size=1, axis=0): - raise NotImplementedError + def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): - @property - def shapes(self): - return self._data.shapes + # a new or copied Tensor always has no owners + self._fermion_owner = None + BlockTensor.__init__(self, data=data, inds=inds, tags=tags, left_inds=left_inds) @property - def shape(self): - """Return the "inflated" shape composed of maximal size for each leg - """ - shapes = self.shapes - return tuple(np.amax(shapes, axis=0)) - - @functools.wraps(tensor_split) - def split(self, *args, **kwargs): - return tensor_split(self, *args, **kwargs) - - def transpose(self, *output_inds, inplace=False): - """Transpose this tensor. This does not change the physical meaning of - the operator represented, eg: - T_{abc}a^{\dagger}b^{\dagger}c^{\dagger} = \tilda{T}_{cab}c^{\dagger}a^{\dagger}b^{\dagger} + def symmetry(self): + return self.data.dq.__name__ - Parameters - ---------- - output_inds : sequence of str - The desired output sequence of indices. - inplace : bool, optional - Perform the tranposition inplace. + @property + def fermion_owner(self): + return self._fermion_owner - Returns - ------- - tt : Tensor - The transposed tensor. + @property + def parity(self): + return self.data.parity - See Also - -------- - transpose_like + def copy(self, deep=False): + """Copy this tensor. Note by default (``deep=False``), the underlying + array will *not* be copied. The fermion owner will to reset to None """ - t = self if inplace else self.copy() - - output_inds = tuple(output_inds) # need to re-use this. - - if set(t.inds) != set(output_inds): - raise ValueError("'output_inds' must be permutation of the current" - f" tensor indices, but {set(t.inds)} != " - f"{set(output_inds)}") - - current_ind_map = {ind: i for i, ind in enumerate(t.inds)} - out_shape = tuple(current_ind_map[i] for i in output_inds) - t.modify(apply=lambda x: np.transpose(x, out_shape), inds=output_inds) + if deep: + t = copy.deepcopy(self) + t.remove_fermion_owner() + else: + t = self.__class__(self, None) return t - transpose_ = functools.partialmethod(transpose, inplace=True) - - @property - def H(self): - """Return the ket of this tensor, eg: - U_{abc} a^{\dagger}b^{\dagger}c^{\dagger} -> U^{cba\star}cba - Note this is different from Fermionic transposition - """ - data = self.data.dagger - inds = self.inds[::-1] - tsr = self.copy() - tsr.modify(data=data, inds=inds) - - return tsr - - def fuse(self, fuse_map, inplace=False): - raise NotImplementedError - - def unfuse(self, unfuse_map, shape_map, inplace=False): - raise NotImplementedError - - def squeeze(self, inplace=False): - raise NotImplementedError + def get_fermion_info(self): + if self.fermion_owner is None: + return None + fs, tid = self.fermion_owner + site = fs.tensor_order[tid][1] + return (tid, site) - def norm(self): - """Frobenius norm of this tensor. - """ - return np.linalg.norm(self.data.data, 2) + def contract(self, *others, output_inds=None, **opts): + return tensor_contract(self, *others, output_inds=output_inds, **opts) - def symmetrize(self, ind1, ind2, inplace=False): - raise NotImplementedError + @fermion_owner.setter + def fermion_owner(self, fowner): + self._fermion_owner = fowner - def unitize(self, left_inds=None, inplace=False, method='qr'): - raise NotImplementedError + def set_fermion_owner(self, fs, tid): + self.fermion_owner = (fs, tid) - def randomize(self, dtype=None, inplace=False, **randn_opts): - raise NotImplementedError + def remove_fermion_owner(self): + self.fermion_owner = None - def flip(self, ind, inplace=False): - raise NotImplementedError + def split(self, *args, **kwargs): + return tensor_split(self, *args, **kwargs) def __and__(self, other): """Combine with another ``Tensor`` or ``TensorNetwork`` into a new @@ -1143,79 +545,104 @@ def __or__(self, other): """ return FermionTensorNetwork((self, other), virtual=True) - def graph(self, *args, **kwargs): + def draw(self, *args, **kwargs): """Plot a graph of this tensor and its indices. """ - FermionTensorNetwork((self,)).graph(*args, **kwargs) - + draw_tn(FermionTensorNetwork((self,)), *args, **kwargs) -def is_mergeable(*ts_or_tsn): - """Check if all objects(FermionTensor or FermionTensorNetwork) - are part of the same FermionSpace - """ - if isinstance(ts_or_tsn, (FermionTensor, FermionTensorNetwork)): - return True - fs_lst = [] - site_lst = [] - for obj in ts_or_tsn: - if isinstance(obj, FermionTensor): - if obj.fermion_owner is None: - return False - hashval, fsobj, tid = obj.fermion_owner - if fsobj() is None: - return False - fs_lst.append(hashval) - site_lst.append(fsobj()[tid][1]) - elif isinstance(obj, FermionTensorNetwork): - fs_lst.append(hash(obj.fermion_space)) - site_lst.extend(obj.filled_sites) - else: - raise TypeError("unable to find fermionspace") + graph = draw - return all([fs==fs_lst[0] for fs in fs_lst]) and len(set(site_lst)) == len(site_lst) +# --------------------------------------------------------------------------- # +# Tensor Network Class # +# --------------------------------------------------------------------------- # -class FermionTensorNetwork(TensorNetwork): +class FermionTensorNetwork(BlockTensorNetwork): + __slots__ = ('_inner_inds', '_outer_inds', '_tid_counter') + _EXTRA_PROPS = () + _CONTRACT_STRUCTURED = False - def __init__(self, ts, *, virtual=False, check_collisions=True): + def __init__(self, ts, *, virtual=False, check_collisions=True): - if is_mergeable(*ts) and virtual: + # short-circuit for copying TensorNetworks + if isinstance(ts, self.__class__): + if not ts.is_continuous(): + raise TypeError("Tensors not continuously placed in the network, \ + this maybe due to this network being part of another network") + fs = FermionSpace() + self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map) + self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map) self.tensor_map = dict() - self.tag_map = dict() - self.ind_map = dict() - self.fermion_space = _fetch_fermion_space(*ts)[0] - self.assemble(ts) + for t in ts: + tid = t.get_fermion_info()[0] + t = t.copy() + self.tensor_map[tid] = t + self.tensor_map[tid].add_owner(self, tid) + fs.add_tensor(t, tid=tid, virtual=True) + self._inner_inds = ts._inner_inds.copy() + self._outer_inds = ts._outer_inds.copy() + self._tid_counter = ts._tid_counter + self.exponent = ts.exponent + for ep in ts.__class__._EXTRA_PROPS: + setattr(self, ep, getattr(ts, ep)) + return else: - if isinstance(ts, FermionTensorNetwork): - self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map) - self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map) - self.fermion_space = ts.fermion_space if virtual else ts.fermion_space.copy() - self.tensor_map = dict() - for tid, t in ts.tensor_map.items(): - self.tensor_map[tid] = self.fermion_space[tid][2] - self.tensor_map[tid].add_owner(self, tid) - for ep in ts.__class__._EXTRA_PROPS: - setattr(self, ep, getattr(ts, ep)) - self.exponent = ts.exponent - return + BlockTensorNetwork.__init__(self, ts, virtual=virtual, check_collisions=True) - # internal structure - self.fermion_space = FermionSpace() - self.tensor_map = dict() - self.tag_map = dict() - self.ind_map = dict() - self._inner_inds = oset() - for t in ts: - self.add(t, virtual=virtual, check_collisions=check_collisions) - self._inner_inds = None - self.exponent = 0.0 + @property + def fermion_space(self): + if len(self.tensor_map)==0: + return FermionSpace() + else: + return list(self.tensor_map.values())[0].fermion_owner[0] + + @property + def filled_sites(self): + return [self.fermion_space.tensor_order[tid][1] for tid in self.tensor_map.keys()] + + @property + def H(self): + tn = self.copy(full=True) + fs = tn.fermion_space + max_site = max(fs.sites) + for tid, (T, site) in fs.tensor_order.items(): + T.modify(data=T.data.dagger, inds=T.inds[::-1]) + fs.tensor_order.update({tid: (T, max_site-site)}) + return tn + + def is_continuous(self): + """ + Check if sites in the current tensor network are contiguously occupied + """ + filled_sites = self.filled_sites + if len(filled_sites) ==0 : return True + return (max(filled_sites) - min(filled_sites) + 1) == len(filled_sites) + + def copy(self, full=False): + """ For full copy, the tensors and underlying FermionSpace(all tensors in it) will + be copied. For partial copy, the tensors in this network must be continuously + placed and a new FermionSpace will be created to hold this continous sector. + """ + if full: + fs = self.fermion_space.copy() + tids = list(self.tensor_map.keys()) + tsr = [fs.tensor_order[tid][0] for tid in tids] + newtn = FermionTensorNetwork(tsr, virtual=True) + else: + if not self.is_continuous(): + raise TypeError("Tensors not continuously placed in the network, \ + partial copy not allowed") + newtn = FermionTensorNetwork(self) + newtn.view_like_(self) + return newtn def __and__(self, other): """Combine this tensor network with more tensors, without contracting. Copies the tensors. """ - virtual = is_mergeable(self, other) - return FermionTensorNetwork((self, other), virtual=virtual) + if is_mergeable(self, other): + raise ValueError("the two networks are in the same fermionspace, use self |= other") + return FermionTensorNetwork((self, other), virtual=False) def __or__(self, other): """Combine this tensor network with more tensors, without contracting. @@ -1226,84 +653,46 @@ def __or__(self, other): def __iter__(self): sorted_sites = sorted(self.filled_sites) for isite in sorted_sites: - yield self.fermion_space[isite][2] + tid = self.fermion_space.get_tid_from_site(isite) + yield self.tensor_map[tid] - def _reorder_from_tid(self, tid_map, inplace=False): - tn = self if inplace else self.copy(full=True) - tn.fermion_space._reorder_from_dict(tid_map) - return tn + def __setitem__(self, tags, tensor): + """Set the single tensor uniquely associated with ``tags``. + """ + tids = self._get_tids_from_tags(tags, which='all') + if len(tids) != 1: + raise KeyError("'TensorNetwork.__setitem__' is meant for a single " + "existing tensor only - found {} with tag(s) '{}'." + .format(len(tids), tags)) - def balance_bonds(self, inplace=False): - """Apply :func:`~quimb.tensor.fermion.tensor_balance_bond` to - all bonds in this tensor network. + if not isinstance(tensor, FermionTensor): + raise TypeError("Can only set value with a new 'FermionTensor'.") - Parameters - ---------- - inplace : bool, optional - Whether to perform the bond balancing inplace or not. + tid, = tids + site = self.fermion_space.tensor_order[tid][1] + TensorNetwork._pop_tensor(self, tid) + TensorNetwork.add_tensor(self, tensor, tid=tid, virtual=True) + self.fermion_space.replace_tensor(site, tensor, tid=tid, virtual=True) - Returns - ------- - TensorNetwork - """ + def _reorder_from_tid(self, tid_map, inplace=False): tn = self if inplace else self.copy(full=True) - - for ix, tids in tn.ind_map.items(): - if len(tids) != 2: - continue - tid1, tid2 = tids - t1, t2 = [tn.tensor_map[x] for x in (tid1, tid2)] - tensor_balance_bond(t1, t2) - + tn.fermion_space._reorder_from_dict(tid_map) return tn - balance_bonds_ = functools.partialmethod(balance_bonds, inplace=True) - - def assemble_with_tensor(self, tsr): - if not is_mergeable(self, tsr): - raise ValueError("tensor not same in the fermion space of the tensor network") - tid = tsr.fermion_owner[2] - TensorNetwork.add_tensor(self, tsr, tid, virtual=True) - - def assemble_with_tensor_network(self, tsn): - if not is_mergeable(self, tsn): - raise ValueError("the two tensor networks not in the fermion space") - TensorNetwork.add_tensor_network(self, tsn, virtual=True) - - def assemble(self, t): - if isinstance(t, (tuple, list)): - for each_t in t: - self.assemble(each_t) - return - - istensor = isinstance(t, FermionTensor) - istensornetwork = isinstance(t, FermionTensorNetwork) - - if not (istensor or istensornetwork): - raise TypeError("TensorNetwork should be called as " - "`TensorNetwork(ts, ...)`, where each " - "object in 'ts' is a Tensor or " - "TensorNetwork.") - if istensor: - self.assemble_with_tensor(t) - else: - self.assemble_with_tensor_network(t) - def add_tensor(self, tsr, tid=None, virtual=False, site=None): if tid is None or tid in self.fermion_space.tensor_order.keys(): tid = rand_uuid(base="_T") + if virtual: - fs = tsr.fermion_owner + fs = tsr.fermion_owner if fs is not None: - if fs[0] != hash(self.fermion_space): - raise ValueError("the tensor is already is in a different Fermion Space, \ - inplace addition not allowed") + if hash(fs[0]) != hash(self.fermion_space) and len(self.tensor_map)!=0: + raise ValueError("the tensor is already in a different FermionSpace, inplace addition not allowed") else: - if fs[2] in self.tensor_map.keys(): - raise ValueError("the tensor is already in this TensorNetwork, \ - inplace addition not allowed") - else: - self.assemble_with_tensor(tsr) + tid, isite = tsr.get_fermion_info() + if site is not None and site != isite: + raise ValueError("the specified site not consistent with the original location of this tensor in the FermionSpace") + TensorNetwork.add_tensor(self, tsr, tid, virtual=True) else: self.fermion_space.add_tensor(tsr, tid, site, virtual=True) TensorNetwork.add_tensor(self, tsr, tid, virtual=True) @@ -1314,9 +703,12 @@ def add_tensor(self, tsr, tid=None, virtual=False, site=None): def add_tensor_network(self, tn, virtual=False, check_collisions=True): if virtual: - if hash(tn.fermion_space) == hash(self.fermion_space): + if min(len(self.tensor_map), len(tn.tensor_map)) == 0: + TensorNetwork.add_tensor_network(self, tn, virtual=virtual, check_collisions=check_collisions) + return + elif hash(tn.fermion_space) == hash(self.fermion_space): if is_mergeable(self, tn): - TensorNetwork.add_tensor_network(tn, virtual=virtual, check_collisions=check_collisions) + TensorNetwork.add_tensor_network(self, tn, virtual=virtual, check_collisions=check_collisions) else: raise ValueError("the two tensornetworks co-share same sites, inplace addition not allow") return @@ -1359,106 +751,22 @@ def add_tensor_network(self, tn, virtual=False, check_collisions=True): for tid, tsr in sorted_tensors: self.add_tensor(tsr, tid=tid, virtual=virtual) - def add(self, t, virtual=False, check_collisions=True): - """Add FermionTensor, FermionTensorNetwork or sequence thereof to self. - """ - if isinstance(t, (tuple, list)): - for each_t in t: - self.add(each_t, virtual=virtual, - check_collisions=check_collisions) - return - - istensor = isinstance(t, FermionTensor) - istensornetwork = isinstance(t, FermionTensorNetwork) - - if not (istensor or istensornetwork): - raise TypeError("TensorNetwork should be called as " - "`TensorNetwork(ts, ...)`, where each " - "object in 'ts' is a Tensor or " - "TensorNetwork.") - - if istensor: - self.add_tensor(t, virtual=virtual) - else: - self.add_tensor_network(t, virtual=virtual, - check_collisions=check_collisions) - def select(self, tags, which='all'): - tagged_tids = self._get_tids_from_tags(tags, which=which) ts = [self.tensor_map[n] for n in tagged_tids] tn = FermionTensorNetwork(ts, check_collisions=False, virtual=True) tn.view_like_(self) return tn - def __iand__(self, tensor): - """Inplace, but non-virtual, addition of a Tensor or TensorNetwork to - this network. It should not have any conflicting indices. - """ - if is_mergeable(self, tensor): - self.assemble(tensor) - else: - self.add(tensor, virtual=False) - return self - - def __ior__(self, tensor): - """Inplace, virtual, addition of a Tensor or TensorNetwork to this - network. It should not have any conflicting indices. - """ - if is_mergeable(self, tensor): - self.assemble(tensor) - else: - self.add(tensor, virtual=True) - return self - - # ------------------------------- Methods ------------------------------- # - - @property - def filled_sites(self): - return [self.fermion_space[tid][1] for tid in self.tensor_map.keys()] - - def is_complete(self): - ''' - Check if the current tensor network contains all the tensors in the fermion space - ''' - full_tid = self.fermion_space.tensor_order.keys() - tensor_tid = self.tensor_map.keys() - return set(full_tid) == set(tensor_tid) - - def is_continuous(self): - """ - Check if sites in the current tensor network are contiguously occupied - """ - filled_sites = self.filled_sites - if len(filled_sites) ==0 : return True - return (max(filled_sites) - min(filled_sites) + 1) == len(filled_sites) - - def copy(self, full=False): - """ For full copy, the tensors and underlying FermionSpace(all tensors in it) will - be copied. For partial copy, the tensors in this network must be continuously - placed and a new FermionSpace will be created to hold this continous sector. - """ - if full: - return self.__class__(self, virtual=False) - else: - if not self.is_continuous(): - raise TypeError("Tensors not continuously placed in the network, \ - partial copy not allowed") - newtn = FermionTensorNetwork([]) - newtn.add_tensor_network(self) - newtn.view_like_(self) - return newtn - - - def _pop_tensor(self, tid, remove_from_fs=True): + def _pop_tensor(self, tid, remove_from_fs=False): """Remove a tensor from this network, returning said tensor. """ # pop the tensor itself t = self.tensor_map.pop(tid) # remove the tid from the tag and ind maps - self._remove_tid(t.tags, self.tag_map, tid) - self._remove_tid(t.inds, self.ind_map, tid) + self._unlink_tags(t.tags, tid) + self._unlink_inds(t.inds, tid) # remove this tensornetwork as an owner t.remove_owner(self) @@ -1468,83 +776,9 @@ def _pop_tensor(self, tid, remove_from_fs=True): return t - - _pop_tensor_ = functools.partialmethod(_pop_tensor, remove_from_fs=False) - - @property - def H(self): - tn = self.copy(full=True) - fs = tn.fermion_space - max_site = max(fs.sites) - - for tid, (tsr, site) in fs.tensor_order.items(): - new_data = tsr.data.dagger - new_inds = tsr.inds[::-1] - tsr.modify(data=new_data, inds=new_inds) - fs.tensor_order.update({tid: (tsr, max_site-site)}) - return tn - - def __mul__(self, other): - raise NotImplementedError - - def __rmul__(self, other): - raise NotImplementedError - - def __imul__(self, other): - raise NotImplementedError - - def __truediv__(self, other): - raise NotImplementedError - - def __itruediv__(self, other): - raise NotImplementedError - - # ----------------- selecting and splitting the network ----------------- # - - def __setitem__(self, tags, tensor): - #TODO: FIXME - """Set the single tensor uniquely associated with ``tags``. - """ - tids = self._get_tids_from_tags(tags, which='all') - if len(tids) != 1: - raise KeyError("'TensorNetwork.__setitem__' is meant for a single " - "existing tensor only - found {} with tag(s) '{}'." - .format(len(tids), tags)) - - if not isinstance(tensor, FermionTensor): - raise TypeError("Can only set value with a new 'FermionTensor'.") - - tid, = tids - site = self.fermion_space.tensor_order[tid][1] - TensorNetwork._pop_tensor(tid) - TensorNetwork.add_tensor(tensor, tid=tid, virtual=True) - self.fermion_space.replace_tensor(site, tensor, tid=tid, virtual=True) + _pop_tensor_ = functools.partialmethod(_pop_tensor, remove_from_fs=True) def partition_tensors(self, tags, inplace=False, which='any'): - """Split this TN into a list of tensors containing any or all of - ``tags`` and a ``FermionTensorNetwork`` of the the rest. - The tensors and FermionTensorNetwork remain in the same FermionSpace - - Parameters - ---------- - tags : sequence of str - The list of tags to filter the tensors by. Use ``...`` - (``Ellipsis``) to filter all. - inplace : bool, optional - If true, remove tagged tensors from self, else create a new network - with the tensors removed. - which : {'all', 'any'} - Whether to require matching all or any of the tags. - - Returns - ------- - (u_tn, t_ts) : (FermionTensorNetwork, tuple of FermionTensors) - The untagged fermion tensor network, and the sequence of tagged Tensors. - - See Also - -------- - partition, select, select_tensors - """ tagged_tids = self._get_tids_from_tags(tags, which=which) # check if all tensors have been tagged @@ -1553,7 +787,7 @@ def partition_tensors(self, tags, inplace=False, which='any'): # Copy untagged to new network, and pop tagged tensors from this untagged_tn = self if inplace else self.copy(full=True) - tagged_ts = tuple(map(untagged_tn._pop_tensor_, sorted(tagged_tids))) + tagged_ts = tuple(map(untagged_tn._pop_tensor, sorted(tagged_tids))) return untagged_tn, tagged_ts @@ -1573,7 +807,7 @@ def partition(self, tags, which='any', inplace=False): Returns ------- - untagged_tn, tagged_tn : (FermionTensorNetwork, FermionTensorNetwork) + untagged_tn, tagged_tn : (TensorNetwork, TensorNetwork) The untagged and tagged tensor networs. See Also @@ -1581,99 +815,39 @@ def partition(self, tags, which='any', inplace=False): partition_tensors, select, select_tensors """ tagged_tids = self._get_tids_from_tags(tags, which=which) - kws = {'check_collisions': False} t1 = self if inplace else self.copy(full=True) - t2s = [t1._pop_tensor_(tid) for tid in tagged_tids] + t2s = [t1._pop_tensor(tid) for tid in tagged_tids] t2 = FermionTensorNetwork(t2s, virtual=True, **kws) t2.view_like_(self) return t1, t2 - def replace_with_svd(self, where, left_inds, eps, *, which='any', - right_inds=None, method='isvd', max_bond=None, - absorb='both', cutoff_mode='rel', renorm=None, - ltags=None, rtags=None, keep_tags=True, - start=None, stop=None, inplace=False): - raise NotImplementedError - def contract_between(self, tags1, tags2, **contract_opts): - """Contract the two tensors specified by ``tags1`` and ``tags2`` - respectively. This is an inplace operation. No-op if the tensor - specified by ``tags1`` and ``tags2`` is the same tensor. - - Parameters - ---------- - tags1 : - Tags uniquely identifying the first tensor. - tags2 : str or sequence of str - Tags uniquely identifying the second tensor. - contract_opts - Supplied to :func:`~quimb.tensor.fermion.tensor_contract`. - """ tid1, = self._get_tids_from_tags(tags1, which='all') tid2, = self._get_tids_from_tags(tags2, which='all') - direction = contract_opts.pop("direction", "left") # allow no-op for same tensor specified twice ('already contracted') if tid1 == tid2: return - - self._pop_tensor_(tid1) - self._pop_tensor_(tid2) - - out = self.fermion_space._contract_pairs(tid1, tid2, direction=direction, inplace=True) - if isinstance(out, (float, complex)): - return out + T1 = self._pop_tensor(tid1) + T2 = self._pop_tensor(tid2) + T12 = tensor_contract(T1, T2, inplace=True, **contract_opts) + if isinstance(T12, (float, complex)): + return T12 else: - self |= out + self |= T12 def contract_ind(self, ind, **contract_opts): """Contract tensors connected by ``ind``. """ tids = self._get_tids_from_inds(ind) - if len(tids) <= 1: return - ts = [self._pop_tensor_(tid) for tid in tids] - direction = contract_opts.pop("direction", "left") - out = tensor_contract(*ts, direction=direction, inplace=True) + ts = [self._pop_tensor(tid) for tid in tids] + out = tensor_contract(*ts, inplace=True, **contract_opts) if isinstance(out, (float, complex)): return out else: self |= out - def contract_tags(self, tags, inplace=False, which='any', **opts): - - tids = self._get_tids_from_tags(tags, which='any') - if len(tids) == 0: - raise ValueError("No tags were found - nothing to contract. " - "(Change this to a no-op maybe?)") - elif len(tids) == 1: - return self - - untagged_tn, tagged_ts = self.partition_tensors( - tags, inplace=inplace, which=which) - - - contracted = tensor_contract(*tagged_ts, inplace=True, **opts) - - if untagged_tn is None: - return contracted - - untagged_tn.add_tensor(contracted, virtual=True) - return untagged_tn - - def contract(self, tags=..., inplace=False, **opts): - - if tags is all: - return tensor_contract(*self, inplace=inplace, **opts) - - # this checks whether certain TN classes have a manually specified - # contraction pattern (e.g. 1D along the line) - if self._CONTRACT_STRUCTURED: - raise NotImplementedError() - - # else just contract those tensors specified by tags. - return self.contract_tags(tags, inplace=inplace, **opts) - def _compress_between_tids( self, tid1, @@ -1683,14 +857,14 @@ def _compress_between_tids( equalize_norms=False, **compress_opts ): - Tl = self.tensor_map[tid1] Tr = self.tensor_map[tid2] - tensor_compress_bond(Tl, Tr, inplace=True, **compress_opts) if canonize_distance: raise NotImplementedError + tensor_compress_bond(Tl, Tr, **compress_opts) + if equalize_norms: self.strip_exponent(tid1, equalize_norms) self.strip_exponent(tid2, equalize_norms) @@ -1710,24 +884,39 @@ def _canonize_between_tids( self.strip_exponent(tid1, equalize_norms) self.strip_exponent(tid2, equalize_norms) - def replace_section_with_svd(self, start, stop, eps, - **replace_with_svd_opts): - raise NotImplementedError + # ----------------------- contracting the network ----------------------- # + def contract_tags(self, tags, inplace=False, which='any', **opts): + untagged_tn, tagged_ts = self.partition_tensors( + tags, inplace=inplace, which=which) - def convert_to_zero(self): - raise NotImplementedError + if not tagged_ts: + raise ValueError("No tags were found - nothing to contract. " + "(Change this to a no-op maybe?)") - def compress_all(self, inplace=False, **compress_opts): - raise NotImplementedError + contracted = tensor_contract(*tagged_ts, inplace=True, **opts) - def new_bond(self, tags1, tags2, **opts): - raise NotImplementedError + if untagged_tn is None: + return contracted - def cut_bond(self, bnd, left_ind, right_ind): - raise NotImplementedError + untagged_tn.add_tensor(contracted, virtual=True) + return untagged_tn - def cut_between(self, left_tags, right_tags, left_ind, right_ind): - raise NotImplementedError + def contract(self, tags=..., inplace=False, **opts): + if tags is all: + return tensor_contract(*self, inplace=inplace, **opts) + + # this checks whether certain TN classes have a manually specified + # contraction pattern (e.g. 1D along the line) + if self._CONTRACT_STRUCTURED: + if (tags is ...) or isinstance(tags, slice): + return self.contract_structured(tags, inplace=inplace, **opts) - def cut_iter(self, *inds): - raise NotImplementedError + # else just contract those tensors specified by tags. + return self.contract_tags(tags, inplace=inplace, **opts) + + contract_ = functools.partialmethod(contract, inplace=True) + + def __matmul__(self, other): + """Overload "@" to mean full contraction with another network. + """ + return FermionTensorNetwork((self, other)) ^ ... diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index ff94716b..5fefdca6 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -1,34 +1,387 @@ -from .fermion import ( - FermionTensorNetwork, - FermionTensor, - tensor_contract +"""Classes and algorithms related to 2D tensor networks. +""" +import re +import functools +from operator import add +from itertools import product, cycle, starmap +from collections import defaultdict + +import opt_einsum as oe + +from ..utils import check_opt, pairwise +from .tensor_core import ( + bonds, + rand_uuid, + oset, + tags_to_oset, + oset_union, ) from .tensor_2d import ( + Rotator2D, TensorNetwork2D, TensorNetwork2DVector, TensorNetwork2DFlat, + TensorNetwork2DOperator, PEPS, + PEPO, is_lone_coo, gen_long_range_path, - plaquette_to_sites, calc_plaquette_sizes, calc_plaquette_map) -from .tensor_core import ( - rand_uuid, - oset, - tags_to_oset, - bonds -) -from .block_tools import inv_with_smudge -from ..utils import check_opt, pairwise -from collections import defaultdict -from itertools import product -import numpy as np -import functools -from operator import add +from .tensor_block import BlockTensorNetwork +from .fermion import FermionTensor, FermionTensorNetwork, tensor_contract INVERSE_CUTOFF = 1e-10 +class FermionTensorNetwork2D(FermionTensorNetwork, TensorNetwork2D): + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + ) + + def _compatible_2d(self, other): + """Check whether ``self`` and ``other`` are compatible 2D tensor + networks such that they can remain a 2D tensor network when combined. + """ + return ( + isinstance(other, FermionTensorNetwork2D) and + all(getattr(self, e) == getattr(other, e) + for e in FermionTensorNetwork2D._EXTRA_PROPS) + ) + + def __and__(self, other): + new = super().__and__(other) + if self._compatible_2d(other): + new.view_as_(FermionTensorNetwork2D, like=self) + return new + + def __or__(self, other): + new = super().__or__(other) + if self._compatible_2d(other): + new.view_as_(FermionTensorNetwork2D, like=self) + return new + + def flatten(self, fuse_multibonds=True, inplace=False): + raise NotImplementedError + + def reorder(self, direction, layer_tags=None, inplace=False): + Lx, Ly = self._Lx, self._Ly + tid_map = dict() + current_position = 0 + if direction == "row": + iterator = product(range(Lx), range(Ly)) + elif direction == "col": + iterator = product(range(Ly), range(Lx)) + else: + raise KeyError("direction not supported") + + for i, j in iterator: + x, y = (i, j) if direction=="row" else (j, i) + site_tag = self.site_tag(x, y) + tids = self._get_tids_from_tags(site_tag) + if len(tids) == 1: + tid, = tids + if tid not in tid_map: + tid_map[tid] = current_position + current_position +=1 + else: + if layer_tags is None: + _tags = [self.tensor_map[ix].tags for ix in tids] + _tmp_tags = _tags[0].copy() + for itag in _tags[1:]: + _tmp_tags &= itag + _layer_tags = sorted([list(i-_tmp_tags)[0] for i in _tags]) + else: + _layer_tags = layer_tags + for tag in _layer_tags: + tid, = self._get_tids_from_tags((site_tag, tag)) + if tid not in tid_map: + tid_map[tid] = current_position + current_position += 1 + + return self._reorder_from_tid(tid_map, inplace) + + def _contract_boundary_full_bond( + self, + xrange, + yrange, + from_which, + max_bond, + cutoff=0.0, + method='eigh', + renorm=True, + optimize='auto-hq', + opposite_envs=None, + contract_boundary_opts=None, + ): + raise NotImplementedError + + def compute_environments( + self, + from_which, + xrange=None, + yrange=None, + max_bond=None, + *, + cutoff=1e-10, + canonize=True, + mode='mps', + layer_tags=None, + dense=False, + compress_opts=None, + envs=None, + **contract_boundary_opts + ): + direction = {"left": "col", + "right": "col", + "top": "row", + "bottom": "row"}[from_which] + tn = self.reorder(direction, layer_tags=layer_tags) + + r2d = Rotator2D(tn, xrange, yrange, from_which) + sweep, row_tag = r2d.vertical_sweep, r2d.row_tag + contract_boundary_fn = r2d.get_contract_boundary_fn() + + if envs is None: + envs = {} + + if mode == 'full-bond': + # set shared storage for opposite env contractions + contract_boundary_opts.setdefault('opposite_envs', {}) + + envs[from_which, sweep[0]] = FermionTensorNetwork([]) + first_row = row_tag(sweep[0]) + envs['mid', sweep[0]] = tn.select(first_row).copy() + if dense: + tn ^= first_row + envs[from_which, sweep[1]] = tn.select(first_row).copy() + + for i in sweep[2:]: + iprevprev = i - 2 * sweep.step + iprev = i - sweep.step + envs['mid', iprev] = tn.select(row_tag(iprev)).copy() + if dense: + tn ^= (row_tag(iprevprev), row_tag(iprev)) + else: + contract_boundary_fn( + iprevprev, iprev, + max_bond=max_bond, + cutoff=cutoff, + mode=mode, + canonize=canonize, + layer_tags=layer_tags, + compress_opts=compress_opts, + **contract_boundary_opts, + ) + + envs[from_which, i] = tn.select(first_row).copy() + + return envs + + compute_bottom_environments = functools.partialmethod( + compute_environments, from_which='bottom') + + compute_top_environments = functools.partialmethod( + compute_environments, from_which='top') + + compute_left_environments = functools.partialmethod( + compute_environments, from_which='left') + + compute_right_environments = functools.partialmethod( + compute_environments, from_which='right') + + def _compute_plaquette_environments_row_first( + self, + x_bsz, + y_bsz, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, + second_dense=None, + row_envs=None, + **compute_environment_opts + ): + if second_dense is None: + second_dense = x_bsz < 2 + + # first we contract from either side to produce column environments + if row_envs is None: + row_envs = self.compute_row_environments( + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) + + # next we form vertical strips and contract from both top and bottom + # for each column + col_envs = dict() + for i in range(self.Lx - x_bsz + 1): + # + # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● + # ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ + # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┬ + # | | | | | | | | | | | | | | | | | | | | ┊ x_bsz + # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┴ + # ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ + # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● + # + row_i = FermionTensorNetwork(( + row_envs['bottom', i], + *[row_envs['mid', i+x] for x in range(x_bsz)], + row_envs['top', i + x_bsz - 1], + )).view_as_(FermionTensorNetwork2D, like=self) + # + # y_bsz + # <--> second_dense=True + # ●── ──● + # │ │ ╭── ──╮ + # ●── . . ──● │╭─ . . ─╮│ ┬ + # │ │ or ● ● ┊ x_bsz + # ●── . . ──● │╰─ . . ─╯│ ┴ + # │ │ ╰── ──╯ + # ●── ──● + # 'left' 'right' 'left' 'right' + # + col_envs[i] = row_i.compute_col_environments( + xrange=(max(i - 1, 0), min(i + x_bsz, self.Lx - 1)), + max_bond=max_bond, cutoff=cutoff, + canonize=canonize, layer_tags=layer_tags, + dense=second_dense, **compute_environment_opts) + + # then range through all the possible plaquettes, selecting the correct + # boundary tensors from either the column or row environments + plaquette_envs = dict() + for i0, j0 in product(range(self.Lx - x_bsz + 1), + range(self.Ly - y_bsz + 1)): + + # we want to select bordering tensors from: + # + # L──A──A──R <- A from the row environments + # │ │ │ │ + # i0+1 L──●──●──R + # │ │ │ │ <- L, R from the column environments + # i0 L──●──●──R + # │ │ │ │ + # L──B──B──R <- B from the row environments + # + # j0 j0+1 + # + env_ij = FermionTensorNetwork(( + col_envs[i0]['left', j0], + *[col_envs[i0]['mid', ix] for ix in range(j0, j0+y_bsz)], + col_envs[i0]['right', j0 + y_bsz - 1] + ), check_collisions=False) + + ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) + tid_lst = [] + for ij in ij_tags: + tid_lst += list(env_ij._get_tids_from_tags(ij)) + position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) + reorder_map = {i:j for i, j in zip(tid_lst, position)} + env_ij._reorder_from_tid(reorder_map, inplace=True) + plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + + return plaquette_envs + + def _compute_plaquette_environments_col_first( + self, + x_bsz, + y_bsz, + max_bond=None, + cutoff=1e-10, + canonize=True, + layer_tags=None, + second_dense=None, + col_envs=None, + **compute_environment_opts + ): + if second_dense is None: + second_dense = y_bsz < 2 + + # first we contract from either side to produce column environments + if col_envs is None: + col_envs = self.compute_col_environments( + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, **compute_environment_opts) + + # next we form vertical strips and contract from both top and bottom + # for each column + row_envs = dict() + for j in range(self.Ly - y_bsz + 1): + # + # y_bsz + # <--> + # + # ╭─╱o─╱o─╮ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o|─o|──● + # ┃╭─|o─|o─╮┃ + # ●──o╱─o╱──● + # ┃╭─|o─|o─╮┃ + # ●──o╱─o╱──● + # + col_j = FermionTensorNetwork(( + col_envs['left', j], + *[col_envs['mid', j+jn] for jn in range(y_bsz)], + col_envs['right', j + y_bsz - 1], + )).view_as_(FermionTensorNetwork2D, like=self) + # + # y_bsz + # <--> second_dense=True + # ●──●──●──● ╭──●──╮ + # │ │ │ │ or │ ╱ ╲ │ 'top' + # . . . . ┬ + # ┊ x_bsz + # . . . . ┴ + # │ │ │ │ or │ ╲ ╱ │ 'bottom' + # ●──●──●──● ╰──●──╯ + # + row_envs[j] = col_j.compute_row_environments( + yrange=(max(j - 1, 0), min(j + y_bsz, self.Ly - 1)), + max_bond=max_bond, cutoff=cutoff, canonize=canonize, + layer_tags=layer_tags, dense=second_dense, + **compute_environment_opts) + + # then range through all the possible plaquettes, selecting the correct + # boundary tensors from either the column or row environments + plaquette_envs = dict() + for i0, j0 in product(range(self.Lx - x_bsz + 1), + range(self.Ly - y_bsz + 1)): + + # we want to select bordering tensors from: + # + # A──A──A──A <- A from the row environments + # │ │ │ │ + # i0+1 L──●──●──R + # │ │ │ │ <- L, R from the column environments + # i0 L──●──●──R + # │ │ │ │ + # B──B──B──B <- B from the row environments + # + # j0 j0+1 + # + env_ij = FermionTensorNetwork(( + row_envs[j0]['bottom', i0], + *[row_envs[j0]['mid', ix] for ix in range(i0, i0+x_bsz)], + row_envs[j0]['top', i0 + x_bsz - 1] + ), check_collisions=False) + + ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) + tid_lst = [] + for ij in ij_tags: + tid_lst += list(env_ij._get_tids_from_tags(ij)) + position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) + reorder_map = {i:j for i, j in zip(tid_lst, position)} + env_ij._reorder_from_tid(reorder_map, inplace=True) + plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + + return plaquette_envs + def gate_string_split_(TG, where, string, original_ts, bonds_along, reindex_map, site_ix, info, **compress_opts): # by default this means singuvalues are kept in the string 'blob' tensor @@ -51,7 +404,7 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, blob = tensor_contract(*contract_ts, TG, inplace=True) regauged = [] work_site = blob.get_fermion_info()[1] - fs = blob.fermion_owner[1]() + fs = blob.fermion_owner[0] # one by one extract the site tensors again from each end inner_ts = [None] * len(string) @@ -153,12 +506,11 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, # tensors that remain on the string sites and those pulled into string outer_ts, inner_ts = [], [] fermion_info = [] - fs = TG.fermion_owner[1]() + fs = TG.fermion_owner[0] tid_lst = [] for coo, rix, t in zip(string, inds_to_reduce, original_ts): - qpn_info = (t.data.dq, t.data.dq.__class__(0)) tq, tr = t.split(left_inds=None, right_inds=rix, - method='svd', get='tensors', absorb="right", qpn_info=qpn_info) + method='qr', get='tensors', absorb="right") fermion_info.append(t.get_fermion_info()) outer_ts.append(tq) inner_ts.append(tr.reindex_(reindex_map) if coo in where else tr) @@ -181,490 +533,96 @@ def gate_string_reduce_split_(TG, where, string, original_ts, bonds_along, # extract at beginning of string lix = bonds(blob, outer_ts[i]) if i == 0: - lix.add(site_ix[0]) - else: - lix.add(bonds_along[i - 1]) - - # the original bond we are restoring - bix = bonds_along[i] - - # split the blob! - lix = tuple(oset(blob.inds)-oset(lix)) - blob, *maybe_svals, inner_ts[i] = blob.split( - left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) - - # if singular values are returned (``absorb=None``) check if we should - # return them via ``info``, e.g. for ``SimpleUpdate` - if maybe_svals and info is not None: - s = next(iter(maybe_svals)).data - coo_pair = (string[i], string[i + 1]) - info['singular_values', coo_pair] = s - - # regauge the blob but record so as to unguage later - if i != j - 1: - blob.multiply_index_diagonal_(bix, s, location="back") - regauged.append((i + 1, bix, "back", s)) - - # move inwards along string, terminate if two ends meet - i += 1 - if i == j: - inner_ts[i] = blob - break - - # extract at end of string - lix = bonds(blob, outer_ts[j]) - if j == len(string) - 1: - lix.add(site_ix[-1]) - else: - lix.add(bonds_along[j]) - - # the original bond we are restoring - bix = bonds_along[j - 1] - - # split the blob! - inner_ts[j], *maybe_svals, blob = blob.split( - left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) - # if singular values are returned (``absorb=None``) check if we should - # return them via ``info``, e.g. for ``SimpleUpdate` - if maybe_svals and info is not None: - s = next(iter(maybe_svals)).data - coo_pair = (string[j - 1], string[j]) - info['singular_values', coo_pair] = s - - # regauge the blob but record so as to unguage later - if j != i + 1: - blob.multiply_index_diagonal_(bix, s, location="front") - regauged.append((j - 1, bix, "front", s)) - - # move inwards along string, terminate if two ends meet - j -= 1 - if j == i: - inner_ts[j] = blob - break - - for i, (tid, _) in enumerate(fermion_info): - if i==0: - fs.replace_tensor(work_site, inner_ts[i], tid=tid, virtual=True) - else: - fs.insert_tensor(work_site+i, inner_ts[i], tid=tid, virtual=True) - new_ts = [ - tensor_contract(ts, tr, inplace=True).transpose_like_(to) - for to, ts, tr in zip(original_ts, outer_ts, inner_ts) - ] - - for i, bix, location, s in regauged: - snew = inv_with_smudge(s, INVERSE_CUTOFF, gauge_smudge=0) - t = new_ts[i] - t.multiply_index_diagonal_(bix, snew, location=location) - - for (tid, _), to, t in zip(fermion_info, original_ts, new_ts): - site = t.get_fermion_info()[1] - to.modify(data=t.data) - fs.replace_tensor(site, to, tid=tid, virtual=True) - - fs._reorder_from_dict(dict(fermion_info)) - -class FermionTensorNetwork2D(FermionTensorNetwork,TensorNetwork2D): - - def _compatible_2d(self, other): - """Check whether ``self`` and ``other`` are compatible 2D tensor - networks such that they can remain a 2D tensor network when combined. - """ - return ( - isinstance(other, FermionTensorNetwork2D) and - all(getattr(self, e) == getattr(other, e) - for e in FermionTensorNetwork2D._EXTRA_PROPS) - ) - - def __and__(self, other): - new = super().__and__(other) - if self._compatible_2d(other): - new.view_as_(FermionTensorNetwork2D, like=self) - return new - - def __or__(self, other): - new = super().__or__(other) - if self._compatible_2d(other): - new.view_as_(FermionTensorNetwork2D, like=self) - return new - - def flatten(self, fuse_multibonds=True, inplace=False): - raise NotImplementedError - - def compute_row_environments( - self, - max_bond=None, - cutoff=1e-10, - canonize=True, - layer_tags=None, - dense=False, - compress_opts=None, - **contract_boundary_opts - ): - contract_boundary_opts['max_bond'] = max_bond - contract_boundary_opts['cutoff'] = cutoff - contract_boundary_opts['canonize'] = canonize - contract_boundary_opts['layer_tags'] = layer_tags - contract_boundary_opts['compress_opts'] = compress_opts - - if compress_opts is not None: - reorder_tags = compress_opts.pop("reorder_tags", layer_tags) - else: - reorder_tags = layer_tags - env_bottom = self.reorder_right_row(layer_tags=reorder_tags) - env_top = env_bottom.copy() - - row_envs = dict() - - # upwards pass - row_envs['below', 0] = FermionTensorNetwork([]) - first_row = self.row_tag(0) - row_envs['mid', 0] = env_bottom.select(first_row).copy() - row_envs['above', self.Lx - 1] = FermionTensorNetwork([]) - if self.Lx == 1: - return row_envs - if dense: - env_bottom ^= first_row - else: - for j in range(self.Ly): - env_bottom ^= self.site_tag(0, j) - env_bottom.compress_row(0, sweep="right", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) - - row_envs['below', 1] = env_bottom.select(first_row).copy() - for i in range(2, env_bottom.Lx): - below_row = env_bottom.row_tag(i-1) - row_envs["mid", i-1] = env_bottom.select(below_row).copy() - if dense: - env_bottom ^= (self.row_tag(i - 2), self.row_tag(i - 1)) - else: - env_bottom.contract_boundary_from_bottom_( - (i - 2, i - 1), **contract_boundary_opts) - row_envs['below', i] = env_bottom.select(first_row).copy() - - last_row = env_bottom.row_tag(self.Lx-1) - row_envs['mid', self.Lx-1] = env_bottom.select(last_row).copy() - # downwards pass - last_row = self.row_tag(self.Lx - 1) - if dense: - env_top ^= last_row - else: - for j in range(self.Ly): - env_top ^= self.site_tag(self.Lx-1, j) - env_top.compress_row(self.Lx-1, sweep="right", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) - - row_envs['above', self.Lx - 2] = env_top.select(last_row).copy() - for i in range(env_top.Lx - 3, -1, -1): - if dense: - env_top ^= (self.row_tag(i + 1), self.row_tag(i + 2)) - else: - env_top.contract_boundary_from_top_( - (i + 1, i + 2), **contract_boundary_opts) - row_envs['above', i] = env_top.select(last_row).copy() - - return row_envs - - def compute_col_environments( - self, - max_bond=None, - cutoff=1e-10, - canonize=True, - layer_tags=None, - dense=False, - compress_opts=None, - **contract_boundary_opts - ): - contract_boundary_opts['max_bond'] = max_bond - contract_boundary_opts['cutoff'] = cutoff - contract_boundary_opts['canonize'] = canonize - contract_boundary_opts['layer_tags'] = layer_tags - contract_boundary_opts['compress_opts'] = compress_opts - - if compress_opts is not None: - reorder_tags = compress_opts.pop("reorder_tags", layer_tags) - else: - reorder_tags = layer_tags - env_left = self.reorder_upward_column(layer_tags=reorder_tags) - env_right = env_left.copy() - col_envs = dict() - - # upwards pass - col_envs['left', 0] = FermionTensorNetwork([]) - first_col = self.col_tag(0) - col_envs['mid', 0] = env_left.select(first_col).copy() - col_envs['right', self.Ly - 1] = FermionTensorNetwork([]) - if self.Ly == 1: - return col_envs - - if dense: - env_left ^= first_col - else: - for i in range(self.Lx): - env_left ^= self.site_tag(i, 0) - env_left.compress_column(0, sweep="up", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) - col_envs['left', 1] = env_left.select(first_col).copy() - - for i in range(2, env_left.Ly): - left_col = env_left.col_tag(i-1) - col_envs["mid", i-1] = env_left.select(left_col).copy() - if dense: - env_left ^= (self.col_tag(i - 2), self.col_tag(i - 1)) - else: - env_left.contract_boundary_from_left_( - (i - 2, i - 1), **contract_boundary_opts) - col_envs['left', i] = env_left.select(first_col).copy() - - last_col = env_left.col_tag(self.Ly-1) - col_envs['mid', self.Ly-1] = env_left.select(last_col).copy() - # downwards pass - last_col = self.col_tag(self.Ly - 1) - if dense: - env_right ^= last_col - else: - for i in range(self.Lx): - env_right ^= self.site_tag(i, self.Ly-1) - env_right.compress_column(self.Ly-1, sweep="up", max_bond=max_bond, cutoff=cutoff, compress_opts=compress_opts) - col_envs['right', self.Ly - 2] = env_right.select(last_col).copy() - for i in range(env_right.Ly - 3, -1, -1): - if dense: - env_right ^= (self.col_tag(i + 1), self.col_tag(i + 2)) - else: - env_right.contract_boundary_from_right_( - (i + 1, i + 2), **contract_boundary_opts) - col_envs['right', i] = env_right.select(last_col).copy() - - return col_envs - - def _compute_plaquette_environments_row_first( - self, - x_bsz, - y_bsz, - max_bond=None, - cutoff=1e-10, - canonize=True, - layer_tags=None, - second_dense=None, - row_envs=None, - **compute_environment_opts - ): - if second_dense is None: - second_dense = x_bsz < 2 - - # first we contract from either side to produce column environments - if row_envs is None: - row_envs = self.compute_row_environments( - max_bond=max_bond, cutoff=cutoff, canonize=canonize, - layer_tags=layer_tags, **compute_environment_opts) - - # next we form vertical strips and contract from both top and bottom - # for each column - col_envs = dict() - for i in range(self.Lx - x_bsz + 1): - # - # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● - # ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ - # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┬ - # | | | | | | | | | | | | | | | | | | | | ┊ x_bsz - # o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o─o ┴ - # ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ ╲ ╱ - # ●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━●━━━● - # - row_i = FermionTensorNetwork(( - row_envs['below', i], - *[row_envs['mid', i+x] for x in range(x_bsz)], - row_envs['above', i + x_bsz - 1], - ), check_collisions=False).view_as_(FermionTensorNetwork2D, like=self) - # - # y_bsz - # <--> second_dense=True - # ●── ──● - # │ │ ╭── ──╮ - # ●── . . ──● │╭─ . . ─╮│ ┬ - # │ │ or ● ● ┊ x_bsz - # ●── . . ──● │╰─ . . ─╯│ ┴ - # │ │ ╰── ──╯ - # ●── ──● - # 'left' 'right' 'left' 'right' - # - col_envs[i] = row_i.compute_col_environments( - xrange=(max(i - 1, 0), min(i + x_bsz, self.Lx - 1)), - dense=second_dense, max_bond=max_bond, cutoff=cutoff, canonize=canonize, - layer_tags=layer_tags, **compute_environment_opts) - - plaquette_envs = dict() - for i0, j0 in product(range(self.Lx - x_bsz + 1), - range(self.Ly - y_bsz + 1)): - - # we want to select bordering tensors from: - # - # L──A──A──R <- A from the row environments - # │ │ │ │ - # i0+1 L──●──●──R - # │ │ │ │ <- L, R from the column environments - # i0 L──●──●──R - # │ │ │ │ - # L──B──B──R <- B from the row environments - # - # j0 j0+1 - # - env_ij = FermionTensorNetwork(( - col_envs[i0]['left', j0], - *[col_envs[i0]['mid', ix] for ix in range(j0, j0+y_bsz)], - col_envs[i0]['right', j0 + y_bsz - 1] - ), check_collisions=False) - - ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) - tid_lst = [] - for ij in ij_tags: - tid_lst += list(env_ij._get_tids_from_tags(ij)) - position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) - reorder_map = {i:j for i, j in zip(tid_lst, position)} - env_ij._reorder_from_tid(reorder_map, inplace=True) - plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij - - return plaquette_envs - - def _compute_plaquette_environments_col_first( - self, - x_bsz, - y_bsz, - max_bond=None, - cutoff=1e-10, - canonize=True, - layer_tags=None, - second_dense=None, - col_envs=None, - **compute_environment_opts - ): - if second_dense is None: - second_dense = y_bsz < 2 + lix.add(site_ix[0]) + else: + lix.add(bonds_along[i - 1]) - # first we contract from either side to produce column environments - if col_envs is None: - col_envs = self.compute_col_environments( - max_bond=max_bond, cutoff=cutoff, canonize=canonize, - layer_tags=layer_tags, **compute_environment_opts) + # the original bond we are restoring + bix = bonds_along[i] - # next we form vertical strips and contract from both top and bottom - # for each column - row_envs = dict() - for j in range(self.Ly - y_bsz + 1): - # - # y_bsz - # <--> - # - # ╭─╱o─╱o─╮ - # ●──o|─o|──● - # ┃╭─|o─|o─╮┃ - # ●──o|─o|──● - # ┃╭─|o─|o─╮┃ - # ●──o|─o|──● - # ┃╭─|o─|o─╮┃ - # ●──o╱─o╱──● - # ┃╭─|o─|o─╮┃ - # ●──o╱─o╱──● - # - col_j = FermionTensorNetwork(( - col_envs['left', j], - *[col_envs['mid', j+y] for y in range(y_bsz)], - col_envs['right', j + y_bsz - 1], - ), check_collisions=False).view_as_(FermionTensorNetwork2D, like=self) - # - # y_bsz - # <--> second_dense=True - # ●──●──●──● ╭──●──╮ - # │ │ │ │ or │ ╱ ╲ │ 'above' - # . . . . ┬ - # ┊ x_bsz - # . . . . ┴ - # │ │ │ │ or │ ╲ ╱ │ 'below' - # ●──●──●──● ╰──●──╯ - # - row_envs[j] = col_j.compute_row_environments( - yrange=(max(j - 1, 0), min(j + y_bsz, self.Ly - 1)), - dense=second_dense, max_bond=max_bond, cutoff=cutoff, canonize=canonize, - layer_tags=layer_tags, **compute_environment_opts) + # split the blob! + lix = tuple(oset(blob.inds)-oset(lix)) + blob, *maybe_svals, inner_ts[i] = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) - # then range through all the possible plaquettes, selecting the correct - # boundary tensors from either the column or row environments - plaquette_envs = dict() - for i0, j0 in product(range(self.Lx - x_bsz + 1), - range(self.Ly - y_bsz + 1)): + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[i], string[i + 1]) + info['singular_values', coo_pair] = s + # regauge the blob but record so as to unguage later + if i != j - 1: + blob.multiply_index_diagonal_(bix, s, location="back") + regauged.append((i + 1, bix, "back", s)) - env_ij = FermionTensorNetwork(( - row_envs[j0]['below', i0], - *[row_envs[j0]['mid', ix] for ix in range(i0, i0+x_bsz)], - row_envs[j0]['above', i0 + x_bsz - 1] - ), check_collisions=False) + # move inwards along string, terminate if two ends meet + i += 1 + if i == j: + inner_ts[i] = blob + break - ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) - tid_lst = [] - for ij in ij_tags: - tid_lst += list(env_ij._get_tids_from_tags(ij)) - position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) - reorder_map = {i:j for i, j in zip(tid_lst, position)} - env_ij._reorder_from_tid(reorder_map, inplace=True) + # extract at end of string + lix = bonds(blob, outer_ts[j]) + if j == len(string) - 1: + lix.add(site_ix[-1]) + else: + lix.add(bonds_along[j]) - plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij + # the original bond we are restoring + bix = bonds_along[j - 1] - return plaquette_envs + # split the blob! + inner_ts[j], *maybe_svals, blob = blob.split( + left_inds=lix, get='tensors', bond_ind=bix, **compress_opts) + # if singular values are returned (``absorb=None``) check if we should + # return them via ``info``, e.g. for ``SimpleUpdate` + if maybe_svals and info is not None: + s = next(iter(maybe_svals)).data + coo_pair = (string[j - 1], string[j]) + info['singular_values', coo_pair] = s + # regauge the blob but record so as to unguage later + if j != i + 1: + blob.multiply_index_diagonal_(bix, s, location="front") + regauged.append((j - 1, bix, "front", s)) - def reorder(self, direction="ru", layer_tags=None, inplace=False): - Lx, Ly = self._Lx, self._Ly - row_wise = (direction[0] in ["r", "l"]) - iter_dic = {"u": range(Lx), - "d": range(Lx)[::-1], - "r": range(Ly), - "l": range(Ly)[::-1]} - iterator = product(iter_dic[direction[1]], iter_dic[direction[0]]) - position = 0 - tid_map = dict() - for i, j in iterator: - x, y = (i, j) if row_wise else (j, i) - site_tag = self.site_tag(x, y) - tid = self._get_tids_from_tags(site_tag) - if len(tid)==1: - tid, = tid - if tid not in tid_map: - tid_map[tid] = position - position += 1 - else: - if layer_tags is None: - _tags = [self.tensor_map[ix].tags for ix in tid] - _tmp_tags = _tags[0].copy() - for itag in _tags[1:]: - _tmp_tags &= itag - _layer_tags = sorted([list(i-_tmp_tags)[0] for i in _tags]) - else: - _layer_tags = layer_tags - for tag in _layer_tags: - tid, = self._get_tids_from_tags((site_tag, tag)) - if tid not in tid_map: - tid_map[tid] = position - position += 1 + # move inwards along string, terminate if two ends meet + j -= 1 + if j == i: + inner_ts[j] = blob + break - return self._reorder_from_tid(tid_map, inplace) + for i, (tid, _) in enumerate(fermion_info): + if i==0: + fs.replace_tensor(work_site, inner_ts[i], tid=tid, virtual=True) + else: + fs.insert_tensor(work_site+i, inner_ts[i], tid=tid, virtual=True) + new_ts = [ + tensor_contract(ts, tr, inplace=True).transpose_like_(to) + for to, ts, tr in zip(original_ts, outer_ts, inner_ts) + ] - def reorder_upward_column(self, direction="right", layer_tags=None, inplace=False): - direction = "u" + direction[0] - return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + for i, bix, location, s in regauged: + snew = inv_with_smudge(s, INVERSE_CUTOFF, gauge_smudge=0) + t = new_ts[i] + t.multiply_index_diagonal_(bix, snew, location=location) - def reorder_downward_column(self, direction="right", layer_tags=None, inplace=False): - direction = "d" + direction[0] - return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + for (tid, _), to, t in zip(fermion_info, original_ts, new_ts): + site = t.get_fermion_info()[1] + to.modify(data=t.data) + fs.replace_tensor(site, to, tid=tid, virtual=True) - def reorder_right_row(self, direction="upward", layer_tags=None, inplace=False): - direction = "r" + direction[0] - return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) + fs._reorder_from_dict(dict(fermion_info)) - def reorder_left_row(self, direction="upward", layer_tags=None, inplace=False): - direction = "l" + direction[0] - return self.reorder(direction=direction, layer_tags=layer_tags, inplace=inplace) class FermionTensorNetwork2DVector(FermionTensorNetwork2D, FermionTensorNetwork, TensorNetwork2DVector): + """Mixin class for a 2D square lattice vector TN, i.e. one with a single + physical index per site. + """ _EXTRA_PROPS = ( '_site_tag_id', @@ -678,24 +636,12 @@ class FermionTensorNetwork2DVector(FermionTensorNetwork2D, def to_dense(self, *inds_seq, **contract_opts): raise NotImplementedError - def make_norm( self, mangle_append='*', layer_tags=('KET', 'BRA'), return_all=False, ): - """Make the norm tensor network of this 2D vector. - - Parameters - ---------- - mangle_append : {str, False or None}, optional - How to mangle the inner indices of the bra. - layer_tags : (str, str), optional - The tags to identify the top and bottom. - return_all : bool, optional - Return the norm, the ket and the bra. - """ ket = self.copy() ket.add_tag(layer_tags[0]) @@ -771,7 +717,7 @@ def gate( site_tids = psi._get_tids_from_inds(bnds, which='any') # pop the sites, contract, then re-add - pts = [psi._pop_tensor_(tid) for tid in site_tids] + pts = [psi._pop_tensor(tid) for tid in site_tids] out = tensor_contract(*pts, TG, inplace=True) psi.fermion_space.move(out.get_fermion_info()[0], min(isite)) psi |= out @@ -839,16 +785,28 @@ def gate( def compute_local_expectation( self, terms, + max_bond=None, + *, + cutoff=1e-10, + canonize=True, + mode='mps', + layer_tags=('KET', 'BRA'), normalized=False, autogroup=True, - contract_optimize='greedy', + contract_optimize='auto-hq', return_all=False, - layer_tags=('KET', 'BRA'), plaquette_envs=None, plaquette_map=None, **plaquette_env_options, ): norm, ket, bra = self.make_norm(return_all=True, layer_tags=layer_tags) + plaquette_env_options["max_bond"] = max_bond + plaquette_env_options["cutoff"] = cutoff + plaquette_env_options["canonize"] = canonize + plaquette_env_options["mode"] = mode + plaquette_env_options["layer_tags"] = layer_tags + + # factorize both local and global phase on the operator tensors new_terms = dict() for where, op in terms.items(): if is_lone_coo(where): @@ -862,9 +820,6 @@ def compute_local_expectation( new_terms[where] = bra.fermion_space.move_past(TG).data if plaquette_envs is None: - # set some sensible defaults - plaquette_env_options.setdefault('layer_tags', ('KET', 'BRA')) - plaquette_envs = dict() for x_bsz, y_bsz in calc_plaquette_sizes(terms.keys(), autogroup): plaquette_envs.update(norm.compute_plaquette_environments( @@ -883,7 +838,6 @@ def compute_local_expectation( expecs = dict() for p in plaq2coo: # site tags for the plaquette - # view the ket portion as 2d vector so we can gate it tn = plaquette_envs[p] if normalized: norm_i0j0 = tn.contract(all, optimize=contract_optimize) @@ -920,20 +874,49 @@ def compute_local_expectation( return functools.reduce(add, (e for e, _ in expecs.values())) - def normalize( - self, - balance_bonds=False, - equalize_norms=False, - inplace=False, - **boundary_contract_opts, - ): +class FermionTensorNetwork2DOperator(FermionTensorNetwork2D, + FermionTensorNetwork, + TensorNetwork2DOperator): + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_upper_ind_id', + '_lower_ind_id', + ) + + def to_dense(self, *inds_seq, **contract_opts): + raise NotImplementedError + + +class FermionTensorNetwork2DFlat(FermionTensorNetwork2D, + FermionTensorNetwork, + TensorNetwork2DFlat): + """Mixin class for a 2D square lattice tensor network with a single tensor + per site, for example, both PEPS and PEPOs. + """ + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + ) + + def expand_bond_dimension(self, new_bond_dim, inplace=True, bra=None, + rand_strength=0.0): raise NotImplementedError + class FPEPS(FermionTensorNetwork2DVector, + FermionTensorNetwork2DFlat, FermionTensorNetwork2D, - PEPS, - TensorNetwork2DFlat): - + FermionTensorNetwork, + PEPS): _EXTRA_PROPS = ( '_site_tag_id', @@ -946,8 +929,7 @@ class FPEPS(FermionTensorNetwork2DVector, def __init__(self, arrays, *, shape='urdlp', tags=None, site_ind_id='k{},{}', site_tag_id='I{},{}', - row_tag_id='ROW{}', col_tag_id='COL{}', - order_iterator=None, **tn_opts): + row_tag_id='ROW{}', col_tag_id='COL{}', **tn_opts): if isinstance(arrays, FPEPS): super().__init__(arrays) @@ -967,9 +949,7 @@ def __init__(self, arrays, *, shape='urdlp', tags=None, # cache for both creating and retrieving indices ix = defaultdict(rand_uuid) - if order_iterator is None: - order_iterator = product(range(self.Lx), range(self.Ly)) - for i, j in order_iterator: + for i, j in product(range(self.Lx), range(self.Ly)): array = arrays[i][j] # figure out if we need to transpose the arrays from some order @@ -985,15 +965,14 @@ def __init__(self, arrays, *, shape='urdlp', tags=None, array_order = array_order.replace('l', '') # allow convention of missing bonds to be singlet dimensions - if array.ndim != len(array_order): - raise ValueError("array shape not matching array order") + if len(array.shape) != len(array_order): + raise TypeError("input array does not ahve right shape of (Lx, Ly)") transpose_order = tuple( array_order.find(x) for x in 'urdlp' if x in array_order ) - if transpose_order != tuple(range(len(array_order))): - array = array.transpose(transpose_order) + array = np.transpose(array, transpose_order) # get the relevant indices corresponding to neighbours inds = [] @@ -1012,51 +991,114 @@ def __init__(self, arrays, *, shape='urdlp', tags=None, ij_tags = tags | oset((self.site_tag(i, j), self.row_tag(i), self.col_tag(j))) + # create the site tensor! tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) - super().__init__(tensors, check_collisions=False, **tn_opts) + + super().__init__(tensors, virtual=True, **tn_opts) @classmethod - def rand(cls, Lx, Ly, bond_dim, qpn=None, phys_dim=1, - dtype=float, seed=None, shape="urdlp", qpn_map=None, - **peps_opts): - """Create a random (un-normalized) PEPS. - - Parameters - ---------- - Lx : int - The number of rows. - Ly : int - The number of columns. - bond_dim : int - The bond dimension. - physical : int, optional - The physical index dimension. - dtype : dtype, optional - The dtype to create the arrays with, default is real double. - seed : int, optional - A random seed. - parity: int or int array of (0,1), optional - parity for each site, default is random parity for all sites - peps_opts - Supplied to :class:`~quimb.tensor.tensor_2d.PEPS`. - - Returns - ------- - psi : PEPS - """ + def rand(cls, Lx, Ly, bond_dim, phys_dim=2, + dtype=float, seed=None, **peps_opts): + raise NotImplementedError + + def add_PEPS(self, other, inplace=False): + raise NotImplementedError + +class FPEPO(FermionTensorNetwork2DOperator, + FermionTensorNetwork2DFlat, + FermionTensorNetwork2D, + FermionTensorNetwork, + PEPO): + + _EXTRA_PROPS = ( + '_site_tag_id', + '_row_tag_id', + '_col_tag_id', + '_Lx', + '_Ly', + '_upper_ind_id', + '_lower_ind_id', + ) + + def __init__(self, arrays, *, shape='urdlbk', tags=None, + upper_ind_id='k{},{}', lower_ind_id='b{},{}', + site_tag_id='I{},{}', row_tag_id='ROW{}', col_tag_id='COL{}', + **tn_opts): + + if isinstance(arrays, FPEPO): + super().__init__(arrays) + return + + tags = tags_to_oset(tags) + self._upper_ind_id = upper_ind_id + self._lower_ind_id = lower_ind_id + self._site_tag_id = site_tag_id + self._row_tag_id = row_tag_id + self._col_tag_id = col_tag_id + + arrays = tuple(tuple(x for x in xs) for xs in arrays) + self._Lx = len(arrays) + self._Ly = len(arrays[0]) + tensors = [] + + # cache for both creating and retrieving indices + ix = defaultdict(rand_uuid) + + for i, j in product(range(self.Lx), range(self.Ly)): + array = arrays[i][j] + + # figure out if we need to transpose the arrays from some order + # other than up right down left physical + array_order = shape + if i == self.Lx - 1: + array_order = array_order.replace('u', '') + if j == self.Ly - 1: + array_order = array_order.replace('r', '') + if i == 0: + array_order = array_order.replace('d', '') + if j == 0: + array_order = array_order.replace('l', '') + + # allow convention of missing bonds to be singlet dimensions + if len(array.shape) != len(array_order): + raise ValueError("Input arrays do not have right shape (Lx, Ly)") + + transpose_order = tuple( + array_order.find(x) for x in 'urdlbk' if x in array_order + ) + if transpose_order != tuple(range(len(array_order))): + array = np.transpose(array, transpose_order) + + # get the relevant indices corresponding to neighbours + inds = [] + if 'u' in array_order: + inds.append(ix[(i + 1, j), (i, j)]) + if 'r' in array_order: + inds.append(ix[(i, j), (i, j + 1)]) + if 'd' in array_order: + inds.append(ix[(i, j), (i - 1, j)]) + if 'l' in array_order: + inds.append(ix[(i, j - 1), (i, j)]) + inds.append(self.lower_ind(i, j)) + inds.append(self.upper_ind(i, j)) + + # mix site, row, column and global tags + ij_tags = tags | oset((self.site_tag(i, j), + self.row_tag(i), + self.col_tag(j))) + + # create the site tensor! + tensors.append(FermionTensor(data=array, inds=inds, tags=ij_tags)) + + super().__init__(tensors, virtual=True, **tn_opts) + + @classmethod + def rand(cls, Lx, Ly, bond_dim, phys_dim=2, herm=False, + dtype=float, seed=None, **pepo_opts): raise NotImplementedError - if seed is not None: - np.random.seed(seed) - if qpn is None: qpn = (Lx*Ly, Lx*Ly%2) - from pyblock3.algebra import fermion_gen - if qpn_map is None: - distribution = peps_opts.pop("distribution", "even") - qpn_map = fermion_gen._gen_2d_qpn_map(Lx, Ly, qpn, distribution) - - if Lx >= Ly: - arrays = fermion_gen._qpn_map_to_col_skeleton(qpn_map, phys_dim, bond_dim, shape) - else: - arrays = fermion_gen._qpn_map_to_row_skeleton(qpn_map, phys_dim, bond_dim, shape) - return cls(arrays, **peps_opts) + def add_PEPO(self, other, inplace=False): + """Add this PEPO with another. + """ + raise NotImplementedError diff --git a/quimb/tensor/fermion_gen.py b/quimb/tensor/fermion_gen.py index e44ca6ee..2dbf0b60 100644 --- a/quimb/tensor/fermion_gen.py +++ b/quimb/tensor/fermion_gen.py @@ -8,8 +8,11 @@ pattern_map = {"d": "+", "l":"+", "p":"+", "u": "-", "r":"-"} -def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=None): - if symmetry is None: symmetry = bitf.DEFAULT_SYMMETRY +def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=None, use_cpp=None): + if symmetry is None: + symmetry = bitf.dispatch_settings("symmetry") + if use_cpp is None: + use_cpp = bitf.dispatch_settings("use_cpp") state_map = fermion_encoding.get_state_map(symmetry) if state not in state_map: raise KeyError("requested state not recoginized") @@ -22,12 +25,13 @@ def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=None): dat[ind] = 1 blocks = [SubTensor(reduced=dat, q_labels=q_label)] T = SparseFermionTensor(blocks=blocks, pattern=pattern) - if bitf.USE_CPP: + if use_cpp: T = T.to_flat() return T -def gen_mf_peps(state_array, shape='urdlp', symmetry=None, **kwargs): - if symmetry is None: symmetry = bitf.DEFAULT_SYMMETRY +def gen_mf_peps(state_array, shape='urdlp', symmetry=None, use_cpp=None, **kwargs): + if symmetry is None: + symmetry = bitf.dispatch_settings("symmetry") Lx, Ly = state_array.shape arr = state_array.astype("int") cache = dict() @@ -47,7 +51,7 @@ def _gen_ij(i, j): ax = array_order.index('p') key = (state, ndim, ax, pattern) if key not in cache: - cache[key] = _gen_site_tsr(state, pattern, ndim, ax, symmetry).copy() + cache[key] = _gen_site_tsr(state, pattern, ndim, ax, symmetry, use_cpp).copy() return cache[key] tsr_array = [[_gen_ij(i,j) for j in range(Ly)] for i in range(Lx)] diff --git a/quimb/tensor/tensor_block.py b/quimb/tensor/tensor_block.py new file mode 100644 index 00000000..8b38b83a --- /dev/null +++ b/quimb/tensor/tensor_block.py @@ -0,0 +1,583 @@ +"""Core tensor network tools. +""" +import os +import copy +import functools + +import numpy as np + +from ..utils import (check_opt, oset) +from .drawing import draw_tn + +from .tensor_core import Tensor, TensorNetwork, _parse_split_opts, oset_union, tags_to_oset, rand_uuid, _parse_split_opts +from .tensor_core import tensor_contract as _tensor_contract +from .block_tools import apply, get_smudge_balance +from .block_interface import dispatch_settings + +# --------------------------------------------------------------------------- # +# Tensor Funcs # +# --------------------------------------------------------------------------- # + +def _core_contract(T1, T2): + conc = [ind for ind in T1.inds if ind in T2.inds] + ax1 = [T1.inds.index(ind) for ind in conc] + ax2 = [T2.inds.index(ind) for ind in conc] + o_array = np.tensordot(T1.data, T2.data, (ax1, ax2)) + o_ix = tuple([ind for ind in T1.inds+T2.inds if ind not in conc]) + o_tags = oset.union(T1.tags, T2.tags) + if len(o_ix) == 0: + return o_array + else: + return T1.__class__(data=o_array, inds=o_ix, tags=o_tags) + +def tensor_contract(*tensors, output_inds=None, **contract_opts): + path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) + tensors = list(tensors) + for conc in path_info.contraction_list: + pos1, pos2 = sorted(conc[0]) + T2 = tensors.pop(pos2) + T1 = tensors.pop(pos1) + out = _core_contract(T1, T2) + tensors.append(out) + + if not isinstance(out, (float, complex)): + _output_inds = out.inds + if output_inds is None: + output_inds = _output_inds + else: + output_inds = tuple(output_inds) + if output_inds!=_output_inds: + out.transpose_(*output_inds) + return out + +def tensor_split( + T, + left_inds, + method='svd', + get=None, + absorb='both', + max_bond=None, + cutoff=1e-10, + cutoff_mode='rel', + renorm=None, + ltags=None, + rtags=None, + stags=None, + bond_ind=None, + right_inds=None, + qpn_info = None, +): + if left_inds is None: + left_inds = oset(T.inds) - oset(right_inds) + else: + left_inds = tags_to_oset(left_inds) + + if right_inds is None: + right_inds = oset(T.inds) - oset(left_inds) + + _left_inds = [T.inds.index(ind) for ind in left_inds] + _right_inds = [T.inds.index(ind) for ind in right_inds] + + if get == 'values': + raise NotImplementedError + + opts = _parse_split_opts( + method, cutoff, absorb, max_bond, cutoff_mode, renorm) + + # ``s`` itself will be None unless ``absorb=None`` is specified + if method == "svd": + left, s, right = T.data.tensor_svd(_left_inds, right_idx=_right_inds, **opts) + elif method == "qr": + mod = {"right":"qr", "left":"lq"}[absorb] + s = None + left, right = T.data.tensor_qr(_left_inds, right_idx=_right_inds, mod=mod) + else: + raise NotImplementedError + + + if get == 'arrays': + if absorb is None: + return left, s, right + return left, right + + if bond_ind is None: + if absorb is None: + bond_ind = (rand_uuid(), rand_uuid()) + else: + bond_ind = (rand_uuid(),) + else: + if absorb is None: + if isinstance(bond_ind, str): + bond_ind = (bond_ind, rand_uuid()) + else: + if len(bond_ind) != 2: + raise ValueError("for absorb=None, bond_ind must be a tuple/list of two strings") + else: + if isinstance(bond_ind, str): + bond_ind = (bond_ind, ) + + ltags = T.tags | tags_to_oset(ltags) + rtags = T.tags | tags_to_oset(rtags) + + Tl = T.__class__(data=left, inds=(*left_inds, bond_ind[0]), tags=ltags) + Tr = T.__class__(data=right, inds=(bond_ind[-1], *right_inds), tags=rtags) + + if absorb is None: + stags = T.tags | tags_to_oset(stags) + Ts = T.__class__(data=s, inds=bond_ind, tags=stags) + tensors = (Tl, Ts, Tr) + else: + tensors = (Tl, Tr) + + if get == 'tensors': + return tensors + + return BlockTensorNetwork(tensors, check_collisions=False) + +def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): + check_opt('absorb', absorb, ('left', 'both', 'right')) + + if absorb == 'both': + split_opts.setdefault('cutoff', 0.0) + return tensor_compress_bond(T1, T2, **split_opts) + + split_opts.setdefault('method', 'qr') + shared_ix, left_env_ix = T1.filter_bonds(T2) + + if absorb == "right": + new_T1, tRfact = T1.split(left_env_ix, get='tensors', absorb=absorb, **split_opts) + new_T2 = T2.contract(tRfact) + else: + tLfact, new_T2 = T2.split(shared_ix, get="tensors", absorb=absorb, **split_opts) + new_T1 = T1.contract(tLfact) + + T1.modify(data=new_T1.data, inds=new_T1.inds) + T2.modify(data=new_T2.data, inds=new_T2.inds) + +def tensor_compress_bond( + T1, + T2, + reduced=True, + absorb='both', + info=None, + **compress_opts +): + shared_ix, left_env_ix = T1.filter_bonds(T2) + if not shared_ix: + raise ValueError("The tensors specified don't share an bond.") + + if reduced: + # a) -> b) + T1_L, T1_R = T1.split(left_inds=left_env_ix, right_inds=shared_ix, absorb="right", + get='tensors', method='qr') + T2_L, T2_R = T2.split(left_inds=shared_ix, absorb="left", get='tensors', method='qr') + # b) -> c) + M = (T1_R @ T2_L) + M.drop_tags() + # c) -> d) + M_L, *s, M_R = M.split(left_inds=T1_L.bonds(M), get='tensors', + absorb=absorb, **compress_opts) + + # make sure old bond being used + ns_ix, = M_L.bonds(M_R) + M_L.reindex_({ns_ix: shared_ix[0]}) + M_R.reindex_({ns_ix: shared_ix[0]}) + + # d) -> e) + T1C = T1_L.contract(M_L) + T2C = M_R.contract(T2_R) + else: + T12 = T1 @ T2 + T1C, *s, T2C = T12.split(left_inds=left_env_ix, get='tensors', + absorb=absorb, **compress_opts) + T1C.transpose_like_(T1) + T2C.transpose_like_(T2) + + # update with the new compressed data + T1.modify(data=T1C.data, inds=T1C.inds) + T2.modify(data=T2C.data, inds=T2C.inds) + + if s and info is not None: + info['singular_values'], = s + + +def tensor_balance_bond(t1, t2, smudge=1e-6): + ix, = t1.bonds(t2) + t1H = t1.H.reindex_({ix: ix+'*'}) + t2H = t2.H.reindex_({ix: ix+'*'}) + out1 = _core_contract(t1H, t1) + out2 = _core_contract(t2H, t2) + s1, s2 = get_smudge_balance(out1, out2, ix, smudge) + t1.multiply_index_diagonal_(ix, s1, location="back") + t2.multiply_index_diagonal_(ix, s2, location="front") + +# --------------------------------------------------------------------------- # +# Tensor Class # +# --------------------------------------------------------------------------- # + +class BlockTensor(Tensor): + + __slots__ = ('_data', '_inds', '_tags', '_left_inds', '_owners') + + def _apply_function(self, fn): + self._data = apply(self.data, fn) + + def expand_ind(self, ind, size): + raise NotImplementedError + + def new_ind(self, name, size=1, axis=0): + raise NotImplementedError + + @property + def shape(self): + """Return the "inflated" shape composed of maximal size for each leg + """ + return self.data.shape + + def astype(self, dtype, inplace=False): + raise NotImplementedError + + def ind_size(self, ind): + ax = self.inds.index(ind) + return self.get_bond_info(ax) + + def conj(self, inplace=False): + """Conjugate this tensors data (does nothing to indices). + """ + t = self if inplace else self.copy() + t.modify(data=t.data.conj()) + return t + + conj_ = functools.partialmethod(conj, inplace=True) + + @property + def H(self): + t = self.copy() + t.modify(data=t.data.dagger, inds=t.inds[::-1]) + return t + + def transpose(self, *output_inds, inplace=False): + t = self if inplace else self.copy() + + output_inds = tuple(output_inds) # need to re-use this. + + if set(t.inds) != set(output_inds): + raise ValueError("'output_inds' must be permutation of the current" + f" tensor indices, but {set(t.inds)} != " + f"{set(output_inds)}") + + current_ind_map = {ind: i for i, ind in enumerate(t.inds)} + out_shape = tuple(current_ind_map[i] for i in output_inds) + + t.modify(data=np.transpose(t.data, out_shape), inds=output_inds) + return t + + transpose_ = functools.partialmethod(transpose, inplace=True) + + def trace(self, ind1, ind2, inplace=False): + raise NotImplementedError + + def sum_reduce(self, ind, inplace=False): + raise NotImplementedError + + def collapse_repeated(self, inplace=False): + raise NotImplementedError + + def contract(self, *others, output_inds=None, **opts): + return tensor_contract(self, *others, output_inds=output_inds, **opts) + + def direct_product(self, other, sum_inds=(), inplace=False): + raise NotImplementedError + + def split(self, *args, **kwargs): + return tensor_split(self, *args, **kwargs) + + def distance(self, other, **contract_opts): + raise NotImplementedError + + def entropy(self, left_inds, method='svd'): + raise NotImplementedError + + def fuse(self, fuse_map, inplace=False): + raise NotImplementedError + + def unfuse(self, unfuse_map, shape_map, inplace=False): + raise NotImplementedError + + def to_dense(self, *inds_seq, to_qarray=True): + raise NotImplementedError + + def squeeze(self, include=None, inplace=False): + raise NotImplementedError + + def norm(self): + """Frobenius norm of this tensor. + """ + return self.data.norm() + + def symmetrize(self, ind1, ind2, inplace=False): + raise NotImplementedError + + def unitize(self, left_inds=None, inplace=False, method='qr'): + raise NotImplementedError + + def randomize(self, dtype=None, inplace=False, **randn_opts): + raise NotImplementedError + + def flip(self, ind, inplace=False): + raise NotImplementedError + + def multiply_index_diagonal(self, ind, x, inplace=False, location="front"): + if location not in ["front", "back"]: + raise ValueError("invalid for the location of the diagonal") + t = self if inplace else self.copy() + ax = t.inds.index(ind) + if isinstance(x, Tensor): + x = x.data + if location=="front": + out = np.tensordot(x, t.data, axes=((1,), (ax,))) + transpose_order = list(range(1, ax+1)) + [0] + list(range(ax+1, t.ndim)) + else: + out = np.tensordot(t.data, x, axes=((ax,),(0,))) + transpose_order = list(range(ax)) + [t.ndim-1] + list(range(ax, t.ndim-1)) + data = np.transpose(out, transpose_order) + t.modify(data=data) + return t + + multiply_index_diagonal_ = functools.partialmethod( + multiply_index_diagonal, inplace=True) + + def almost_equals(self, other, **kwargs): + raise NotImplementedError + + def __and__(self, other): + """Combine with another ``Tensor`` or ``TensorNetwork`` into a new + ``TensorNetwork``. + """ + return BlockTensorNetwork((self, other)) + + def __or__(self, other): + """Combine virtually (no copies made) with another ``Tensor`` or + ``TensorNetwork`` into a new ``TensorNetwork``. + """ + return BlockTensorNetwork((self, other), virtual=True) + + + def draw(self, *args, **kwargs): + """Plot a graph of this tensor and its indices. + """ + draw_tn(BlockTensorNetwork((self,)), *args, **kwargs) + + graph = draw + +# --------------------------------------------------------------------------- # +# Tensor Network Class # +# --------------------------------------------------------------------------- # + +class BlockTensorNetwork(TensorNetwork): + + __slots__ = ('_inner_inds', '_outer_inds', '_tid_counter') + _EXTRA_PROPS = () + _CONTRACT_STRUCTURED = False + + def replace_with_identity(self, where, which='any', inplace=False): + raise NotImplementedError + + def replace_with_svd(self, where, left_inds, eps, *, which='any', + right_inds=None, method='isvd', max_bond=None, + absorb='both', cutoff_mode='rel', renorm=None, + ltags=None, rtags=None, keep_tags=True, + start=None, stop=None, inplace=False): + raise NotImplementedError + + def replace_section_with_svd(self, start, stop, eps, + **replace_with_svd_opts): + raise NotImplementedError + + def convert_to_zero(self): + raise NotImplementedError + + def contract_between(self, tags1, tags2, **contract_opts): + tid1, = self._get_tids_from_tags(tags1, which='all') + tid2, = self._get_tids_from_tags(tags2, which='all') + + # allow no-op for same tensor specified twice ('already contracted') + if tid1 == tid2: + return + + T1 = self._pop_tensor(tid1) + T2 = self._pop_tensor(tid2) + T12 = tensor_contract(T1, T2, **contract_opts) + self.add_tensor(T12, tid=tid2, virtual=True) + + def contract_ind(self, ind, **contract_opts): + """Contract tensors connected by ``ind``. + """ + tids = self._get_tids_from_inds(ind) + ts = [self._pop_tensor(tid) for tid in tids] + self |= tensor_contract(*ts, **contract_opts) + + def _compress_between_tids( + self, + tid1, + tid2, + canonize_distance=None, + canonize_opts=None, + equalize_norms=False, + **compress_opts + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + + if canonize_distance: + raise NotImplementedError + + tensor_compress_bond(Tl, Tr, **compress_opts) + + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + + def _canonize_between_tids( + self, + tid1, + tid2, + equalize_norms=False, + **canonize_opts, + ): + Tl = self.tensor_map[tid1] + Tr = self.tensor_map[tid2] + tensor_canonize_bond(Tl, Tr, **canonize_opts) + + if equalize_norms: + self.strip_exponent(tid1, equalize_norms) + self.strip_exponent(tid2, equalize_norms) + + def new_bond(self, tags1, tags2, **opts): + raise NotImplementedError + + def insert_gauge(self, U, where1, where2, Uinv=None, tol=1e-10): + raise NotImplementedError + + # ----------------------- contracting the network ----------------------- # + def contract_tags(self, tags, inplace=False, which='any', **opts): + """Contract the tensors that match any or all of ``tags``. + + Parameters + ---------- + tags : sequence of str + The list of tags to filter the tensors by. Use ``...`` + (``Ellipsis``) to contract all. + inplace : bool, optional + Whether to perform the contraction inplace. + which : {'all', 'any'} + Whether to require matching all or any of the tags. + + Returns + ------- + TensorNetwork, Tensor or scalar + The result of the contraction, still a ``TensorNetwork`` if the + contraction was only partial. + + See Also + -------- + contract, contract_cumulative, contract_structured + """ + untagged_tn, tagged_ts = self.partition_tensors( + tags, inplace=inplace, which=which) + + if not tagged_ts: + raise ValueError("No tags were found - nothing to contract. " + "(Change this to a no-op maybe?)") + + contracted = tensor_contract(*tagged_ts, **opts) + + if untagged_tn is None: + return contracted + + untagged_tn.add_tensor(contracted, virtual=True) + return untagged_tn + + def contract(self, tags=..., inplace=False, **opts): + if tags is all: + return tensor_contract(*self, **opts) + + # this checks whether certain TN classes have a manually specified + # contraction pattern (e.g. 1D along the line) + if self._CONTRACT_STRUCTURED: + if (tags is ...) or isinstance(tags, slice): + return self.contract_structured(tags, inplace=inplace, **opts) + + # else just contract those tensors specified by tags. + return self.contract_tags(tags, inplace=inplace, **opts) + + contract_ = functools.partialmethod(contract, inplace=True) + + def __matmul__(self, other): + """Overload "@" to mean full contraction with another network. + """ + return BlockTensorNetwork((self, other)) ^ ... + + def aslinearoperator(self, left_inds, right_inds, ldims=None, rdims=None, + backend=None, optimize='auto'): + raise NotImplementedError + + def to_dense(self, *inds_seq, to_qarray=True, **contract_opts): + raise NotImplementedError + + def distance(self, *args, **kwargs): + raise NotImplementedError + + def fit( + self, + tn_target, + method='als', + tol=1e-9, + inplace=False, + progbar=False, + **fitting_opts + ): + raise NotImplementedError + + # --------------- information about indices and dimensions -------------- # + + def squeeze(self, fuse=False, inplace=False): + raise NotImplementedError + + def unitize(self, mode='error', inplace=False, method='qr'): + raise NotImplementedError + + def balance_bonds(self, inplace=False): + tn = self if inplace else self.copy() + + for ix, tids in tn.ind_map.items(): + if len(tids) != 2: + continue + tid1, tid2 = tids + t1, t2 = [tn.tensor_map[x] for x in (tid1, tid2)] + tensor_balance_bond(t1, t2) + + return tn + + balance_bonds_ = functools.partialmethod(balance_bonds, inplace=True) + + def fuse_multibonds(self, inplace=False): + raise NotImplementedError + + def rank_simplify( + self, + output_inds=None, + equalize_norms=False, + cache=None, + inplace=False, + ): + raise NotImplementedError + + def diagonal_reduce( + self, + output_inds=None, + atol=1e-12, + cache=None, + inplace=False, + ): + raise NotImplementedError diff --git a/quimb/tensor/test/test_block_numerics.py b/quimb/tensor/test/test_block_numerics.py new file mode 100644 index 00000000..b4e75aad --- /dev/null +++ b/quimb/tensor/test/test_block_numerics.py @@ -0,0 +1,210 @@ +import pytest +import numpy as np +from quimb.tensor.tensor_block import ( + BlockTensor, BlockTensorNetwork, tensor_contract) +from quimb.tensor.block_interface import BondInfo, U11, U1, Z2, Z4, Z22, set +from pyblock3.algebra.fermion import SparseFermionTensor + +set(fermion=False) +rand = SparseFermionTensor.random + +@pytest.fixture(scope='class') +def u11setup(request): + bond1 = BondInfo({U11(0):3, U11(1,1): 3, U11(1,-1):3, U11(2):3}) + bond2 = BondInfo({U11(0):5, U11(1,1): 5, U11(1,-1):5, U11(2):5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U11(1,1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(-1,-1), pattern="++-").to_flat() + + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,-1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(-1,1), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=U11(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=U11(1,-1), pattern="++").to_flat() + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z22setup(request): + bond1 = BondInfo({Z22(0):3, Z22(0,1): 3, Z22(1,0):3, Z22(1,1):3}) + bond2 = BondInfo({Z22(0):5, Z22(0,1): 5, Z22(1,0):5, Z22(1,1):5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z22(0,1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z22(1,0), pattern="++-").to_flat() + + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z22(1,0), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z22(0,1), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=Z22(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=Z22(1,0), pattern="++").to_flat() + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def u1setup(request): + bond1 = BondInfo({U1(0):3, U1(1): 3, U1(3):3, U1(2):3}) + bond2 = BondInfo({U1(0):5, U1(1): 5, U1(3):5, U1(2):5}) + + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(2), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(-2), pattern="+-+").to_flat() + + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=U1(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=U1(1), pattern="++").to_flat() + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z4setup(request): + bond1 = BondInfo({Z4(0):3, Z4(1): 3, Z4(3):3, Z4(2):3}) + bond2 = BondInfo({Z4(0):5, Z4(1): 5, Z4(3):5, Z4(2):5}) + + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z4(1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(2), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(0), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(1), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=Z4(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=Z4(1), pattern="++").to_flat() + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z2setup(request): + bond1 = BondInfo({Z2(0):3, Z2(1): 3}) + bond2 = BondInfo({Z2(0):5, Z2(1): 5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(0), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z2(1), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(0), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=Z2(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=Z2(1), pattern="++").to_flat() + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = BlockTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = BlockTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.mark.usefixtures('u11setup') +class TestU11: + def test_backend(self): + Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_between(self): + tn1 = self.tn.copy() + tn1.contract_between("abc", "ega") + Tegbc = tn1["abc"].transpose("e","g","b","c") + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_all(self): + result = self.tn.contract(all) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + deg1 = np.tensordot(self.bcd, egbc, axes=[(0,1),(2,3)]) + ref_val = np.tensordot(self.deg, deg1, axes=[(0,1,2),]*2) + err = abs(result - ref_val) + assert err < 1e-10 + + def test_contract_ind(self): + tn1 = self.tn.copy() + tn1.contract_ind("d") + out = tn1["deg"].transpose("e","g","b","c") + egbc = np.tensordot(self.deg, self.bcd, axes=[(0,),(2,)]) + err = (egbc - out.data).norm() + assert err < 1e-10 + + def test_balance_bonds(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.balance_bonds() + exact_bb = norm1.contract(all, optimize="auto-hq") + assert exact_bb == pytest.approx(exact, rel=1e-2) + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert (tsr1-tsr).data.norm() >1e-10 + + def test_equlaize_norm(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.equalize_norms() + exact_en = norm1.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + ref1 = list(norm1.tensor_map.values())[0].norm() + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert tsr1.norm() == pytest.approx(ref1, rel=1e-2) + + def test_split(self): + Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) + u, s, v = Tegbc.split(("e","b"), method="svd", absorb=None) + out = tensor_contract(u,s,v, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + + for absorb in ["left", "right"]: + for method in ["qr", "svd"]: + l, r = Tegbc.split(("g","c"), method=method, absorb=absorb) + out = tensor_contract(l, r, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + +@pytest.mark.usefixtures('u1setup') +class TestU1(TestU11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestZ4(TestU11): + pass + +@pytest.mark.usefixtures('z22setup') +class TestZ22(TestU11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestZ2(TestU11): + pass diff --git a/quimb/tensor/test/test_fermion_2d.py b/quimb/tensor/test/test_fermion_2d.py new file mode 100644 index 00000000..5a08a450 --- /dev/null +++ b/quimb/tensor/test/test_fermion_2d.py @@ -0,0 +1,247 @@ +import pytest +import numpy as np +import itertools +from quimb.tensor.block_interface import BondInfo, SparseFermionTensor, U11, U1, Z4, Z2, Z22 +from quimb.tensor.fermion_gen import gen_mf_peps + +@pytest.fixture(scope='class') +def u11setup(request): + bond = BondInfo({U11(0):1, U11(2): 1, U11(1,-1):1, U11(1,1):1}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() + Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry="U11") + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def z22setup(request): + bond = BondInfo({Z22(0):1, Z22(0,1): 1, Z22(1,0):1, Z22(1,1):1}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() + Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry="Z22") + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def u1setup(request): + bond = BondInfo({U1(0):1, U1(1): 2, U1(2):1}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() + Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry="U1") + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def z4setup(request): + bond = BondInfo({Z4(0):2, Z4(1): 2}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() + Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry="z4") + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.fixture(scope='class') +def z2setup(request): + bond = BondInfo({Z2(0):2, Z2(1): 2}) + G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() + Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + request.cls.G = G + request.cls.Hij = Hij + request.cls.Lx = Lx = 3 + request.cls.Ly = Ly = 3 + request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) + request.cls.peps = gen_mf_peps(state_array, symmetry="z2") + for itsr in request.cls.peps.tensor_map.values(): + itsr.data.data *= np.random.random(itsr.data.data.size) * 5 + +@pytest.mark.usefixtures('u11setup') +class TestPEPS_U11: + @pytest.mark.parametrize('where', [ + (0, 0), (0, 1), (0, 2), (2, 0), + (1, 0), (1, 1), (1, 2), (2, 1) + ]) + @pytest.mark.parametrize('contract', [False, True]) + def test_gate_2d_single_site(self, where, contract): + G = self.G + Lx = 3 + Ly = 3 + psi = self.peps + xe = psi.compute_local_expectation({where: G}) + tn = psi.H & psi.gate(G, where, contract=contract) + assert len(tn.tensors) == 2 * Lx * Ly + int(not contract) + assert tn ^ all == pytest.approx(xe) + + @pytest.mark.parametrize( + 'contract', [False, True, 'split', 'reduce-split']) + @pytest.mark.parametrize('where', [ + [(1, 1), (2, 1)], [(2, 1), (2, 2)] + ]) + def test_gate_2d_two_site(self, where, contract): + Hij = self.Hij + psi = self.peps + xe = psi.compute_local_expectation({tuple(where): Hij}) + tn = psi.H & psi.gate(Hij, tuple(where), contract=contract) + change = {False: 1, True: -1, 'split': 0, 'reduce-split': 0}[contract] + assert len(tn.tensors) == 2 * self.Lx * self.Ly + change + assert tn ^ all == pytest.approx(xe) + + def test_contract_2d_one_layer_boundary(self): + psi = self.peps + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=6) + assert xt == pytest.approx(xe, rel=1e-2) + + def test_contract_2d_two_layer_boundary(self): + psi = self.peps + norm = psi.make_norm() + xe = norm.contract(all, optimize='auto-hq') + xt = norm.contract_boundary(max_bond=6, layer_tags=['KET', 'BRA']) + assert xt == pytest.approx(xe, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_row_envs(self, two_layer): + psi = self.peps + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_row_environments(**compress_opts) + + for i in range(norm.Lx): + norm_i = ( + row_envs['bottom', i] & + row_envs['mid', i] & + row_envs['top', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + @pytest.mark.parametrize("two_layer", [False, True]) + def test_compute_col_envs(self, two_layer): + psi = self.peps + norm = psi.make_norm() + ex = norm.contract(all) + if two_layer: + compress_opts = {'cutoff': 1e-6, 'max_bond': 12, + 'layer_tags': ['KET', 'BRA']} + else: + compress_opts = {'cutoff': 1e-6, 'max_bond': 8} + row_envs = norm.compute_col_environments(**compress_opts) + + for i in range(norm.Ly): + norm_i = ( + row_envs['left', i] & + row_envs['mid', i] & + row_envs['right', i] + ) + x = norm_i.contract(all) + assert x == pytest.approx(ex, rel=1e-2) + + def test_normalize(self): + psi = self.peps + norm = psi.make_norm().contract(all) + assert norm != pytest.approx(1.0) + psi.normalize_(balance_bonds=True, equalize_norms=True, cutoff=2e-3) + norm = psi.make_norm().contract(all) + assert norm == pytest.approx(1.0, rel=1e-2) + + def test_compute_local_expectation_one_sites(self): + peps = self.peps + coos = list(itertools.product(range(self.Lx), range(self.Ly))) + terms = {coo: self.G for coo in coos} + + expecs = peps.compute_local_expectation( + terms, + normalized=True, + return_all=True) + + norm = peps.compute_norm() + for where, G in terms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where) + tn = ket & bra + out = tn.contract_boundary(max_bond=12) + assert out == pytest.approx(expecs[where][0], rel=1e-2) + assert norm == pytest.approx(expecs[where][1], rel=1e-2) + + def test_compute_local_expectation_two_sites(self): + normalized=True + peps = self.peps + Hij = self.Hij + hterms = {coos: Hij for coos in peps.gen_horizontal_bond_coos()} + vterms = {coos: Hij for coos in peps.gen_vertical_bond_coos()} + + opts = dict(cutoff=2e-3, max_bond=12, contract_optimize='random-greedy') + norm = peps.compute_norm(max_bond=12, cutoff=2e-3) + he = peps.compute_local_expectation( + hterms, normalized=normalized, return_all=True, **opts) + ve = peps.compute_local_expectation( + vterms, normalized=normalized, return_all=True, **opts) + + for where, G in hterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="reduce-split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(he[where][0], rel=1e-2) + assert norm == pytest.approx(he[where][1], rel=1e-2) + + for where, G in vterms.items(): + ket = peps.copy() + ket.add_tag("KET") + bra = ket.H + bra.retag({"KET": "BRA"}) + bra.mangle_inner_("*") + ket.gate_(G, where, contract="split") + tn = ket & bra + out = tn.contract_boundary(max_bond=12, cutoff=2e-3) + assert out == pytest.approx(ve[where][0], rel=1e-2) + assert norm == pytest.approx(ve[where][1], rel=1e-2) + +@pytest.mark.usefixtures('u1setup') +class TestPEPS_U1(TestPEPS_U11): + pass + +@pytest.mark.usefixtures('z22setup') +class TestPEPS_Z22(TestPEPS_U11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestPEPS_Z4(TestPEPS_U11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestPEPS_Z2(TestPEPS_U11): + pass diff --git a/quimb/tensor/test/test_fermion_numerics.py b/quimb/tensor/test/test_fermion_numerics.py new file mode 100644 index 00000000..9d9e5b78 --- /dev/null +++ b/quimb/tensor/test/test_fermion_numerics.py @@ -0,0 +1,210 @@ +import pytest +import numpy as np +from quimb.tensor.fermion import ( + FermionTensor, FermionTensorNetwork, tensor_contract) +from quimb.tensor.block_interface import BondInfo, U11, U1, Z2, Z4, Z22, set +from pyblock3.algebra.fermion import SparseFermionTensor + +set(fermion=True) +rand = SparseFermionTensor.random + +@pytest.fixture(scope='class') +def u11setup(request): + bond1 = BondInfo({U11(0):3, U11(1,1): 3, U11(1,-1):3, U11(2):3}) + bond2 = BondInfo({U11(0):5, U11(1,1): 5, U11(1,-1):5, U11(2):5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U11(1,1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(-1,-1), pattern="++-").to_flat() + + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,-1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(-1,1), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=U11(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=U11(1,-1), pattern="++").to_flat() + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z22setup(request): + bond1 = BondInfo({Z22(0):3, Z22(0,1): 3, Z22(1,0):3, Z22(1,1):3}) + bond2 = BondInfo({Z22(0):5, Z22(0,1): 5, Z22(1,0):5, Z22(1,1):5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z22(0,1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z22(1,0), pattern="++-").to_flat() + + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z22(1,0), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z22(0,1), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=Z22(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=Z22(1,0), pattern="++").to_flat() + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def u1setup(request): + bond1 = BondInfo({U1(0):3, U1(1): 3, U1(3):3, U1(2):3}) + bond2 = BondInfo({U1(0):5, U1(1): 5, U1(3):5, U1(2):5}) + + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(2), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(-2), pattern="+-+").to_flat() + + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=U1(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=U1(1), pattern="++").to_flat() + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z4setup(request): + bond1 = BondInfo({Z4(0):3, Z4(1): 3, Z4(3):3, Z4(2):3}) + bond2 = BondInfo({Z4(0):5, Z4(1): 5, Z4(3):5, Z4(2):5}) + + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z4(1), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(2), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(0), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(1), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=Z4(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=Z4(1), pattern="++").to_flat() + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.fixture(scope='class') +def z2setup(request): + bond1 = BondInfo({Z2(0):3, Z2(1): 3}) + bond2 = BondInfo({Z2(0):5, Z2(1): 5}) + request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(0), pattern="+--").to_flat() + request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z2(1), pattern="++-").to_flat() + request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="+--").to_flat() + request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(0), pattern="+-+").to_flat() + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) + request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) + request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) + request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) + request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) + + ab = rand((bond1, bond1), dq=Z2(0), pattern="+-").to_flat() + bc = rand((bond1, bond1), dq=Z2(1), pattern="++").to_flat() + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) + Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) + Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) + Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) + request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) + yield + +@pytest.mark.usefixtures('u11setup') +class TestU11: + def test_backend(self): + Tegbc = tensor_contract(self.Tega, self.Tabc, output_inds=("e","g","b", "c")) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_between(self): + tn1 = self.tn.copy() + tn1.contract_between("abc", "ega") + Tegbc = tn1["abc"].transpose("e","g","b","c") + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + err = (egbc - Tegbc.data).norm() + assert err < 1e-10 + + def test_contract_all(self): + result = self.tn.contract(all) + egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) + deg1 = np.tensordot(self.bcd, egbc, axes=[(0,1),(2,3)]) + ref_val = np.tensordot(self.deg, deg1, axes=[(0,1,2),]*2) + err = abs(result - ref_val) + assert err < 1e-10 + + def test_contract_ind(self): + tn1 = self.tn.copy() + tn1.contract_ind("d") + out = tn1["deg"].transpose("e","g","b","c") + egbc = np.tensordot(self.deg, self.bcd, axes=[(0,),(2,)]) + err = (egbc - out.data).norm() + assert err < 1e-10 + + def test_balance_bonds(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.balance_bonds() + exact_bb = norm1.contract(all, optimize="auto-hq") + assert exact_bb == pytest.approx(exact, rel=1e-2) + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert (tsr1-tsr).data.norm() >1e-10 + + def test_equlaize_norm(self): + norm = self.norm + exact = norm.contract(all, optimize="auto-hq") + norm1 = norm.equalize_norms() + exact_en = norm1.contract(all, optimize="auto-hq") + assert exact_en == pytest.approx(exact, rel=1e-2) + ref1 = list(norm1.tensor_map.values())[0].norm() + for tid, tsr in norm.tensor_map.items(): + tsr1 = norm1.tensor_map[tid] + assert tsr1.norm() == pytest.approx(ref1, rel=1e-2) + + def test_split(self): + Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) + u, s, v = Tegbc.split(("e","b"), method="svd", absorb=None, get="tensors") + out = tensor_contract(u,s,v, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + + for absorb in ["left", "right"]: + for method in ["qr", "svd"]: + l, r = Tegbc.split(("g","c"), method=method, absorb=absorb, get="tensors") + out = tensor_contract(l, r, output_inds=Tegbc.inds) + assert((out.data-Tegbc.data).norm()<1e-10) + +@pytest.mark.usefixtures('u1setup') +class TestU1(TestU11): + pass + +@pytest.mark.usefixtures('z4setup') +class TestZ4(TestU11): + pass + +@pytest.mark.usefixtures('z22setup') +class TestZ22(TestU11): + pass + +@pytest.mark.usefixtures('z2setup') +class TestZ2(TestU11): + pass From 25cfc3675e27f47b2e63e9bb3f5842e7fae48037 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 28 Apr 2021 15:44:14 -0700 Subject: [PATCH 45/61] clean up tensor_core module --- quimb/tensor/tensor_core.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/quimb/tensor/tensor_core.py b/quimb/tensor/tensor_core.py index f1bf16a2..c8b61b78 100644 --- a/quimb/tensor/tensor_core.py +++ b/quimb/tensor/tensor_core.py @@ -1644,7 +1644,7 @@ def copy(self, deep=False, virtual=False): tensor network, this simply returns ``self``. """ if not (deep or virtual): - return Tensor(self, None) + return self.__class__(self, None) if deep and virtual: raise ValueError("Copy can't be both deep and virtual.") @@ -5093,17 +5093,7 @@ def _do_contraction(tid1, tid2): t1, t2 = self._pop_tensor(tid1), self._pop_tensor(tid2) # contract them -<<<<<<< HEAD - t_new = t1 @ t2 - - if not isinstance(t_new, t1.__class__): - t_new = t1.__class__(t_new, tags=t1.tags | t2.tags) - - if info is not None: - largest_intermediate = max(largest_intermediate, t_new.size) -======= t_new = tensor_contract(t1, t2, preserve_tensor=True) ->>>>>>> upstream/develop # re-add the product, using the same identifier as the (inner) t2 tid_new = tid2 From 1b0be8edf4f241140fb82d005093de08074ee858 Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 28 Apr 2021 16:20:07 -0700 Subject: [PATCH 46/61] fix bug when there is only one operand in tensor_contract --- quimb/tensor/fermion.py | 5 +++++ quimb/tensor/tensor_block.py | 2 ++ 2 files changed, 7 insertions(+) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 6916d5fa..1ca25c6d 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -353,6 +353,11 @@ def _contract_pairs(self, tid1, tid2, output_inds=None): # --------------------------------------------------------------------------- # def tensor_contract(*tensors, output_inds=None, inplace=False, **contract_opts): + if len(tensors) == 1: + if inplace: + return tensors[0] + else: + return tensors[0].copy() path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) if inplace: diff --git a/quimb/tensor/tensor_block.py b/quimb/tensor/tensor_block.py index 8b38b83a..d767de43 100644 --- a/quimb/tensor/tensor_block.py +++ b/quimb/tensor/tensor_block.py @@ -31,6 +31,8 @@ def _core_contract(T1, T2): return T1.__class__(data=o_array, inds=o_ix, tags=o_tags) def tensor_contract(*tensors, output_inds=None, **contract_opts): + if len(tensors) == 1: + return tensors[0] path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) tensors = list(tensors) for conc in path_info.contraction_list: From 20bac5c30f68cbc45d3316e097c1fd96c3c15463 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 30 Apr 2021 15:27:54 -0700 Subject: [PATCH 47/61] bugfix in canonize --- quimb/tensor/fermion_2d.py | 4 +++- quimb/tensor/tensor_block.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 5fefdca6..65aafd61 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -155,6 +155,8 @@ def compute_environments( envs[from_which, sweep[0]] = FermionTensorNetwork([]) first_row = row_tag(sweep[0]) envs['mid', sweep[0]] = tn.select(first_row).copy() + if len(sweep)==1: + return envs if dense: tn ^= first_row envs[from_which, sweep[1]] = tn.select(first_row).copy() @@ -719,8 +721,8 @@ def gate( # pop the sites, contract, then re-add pts = [psi._pop_tensor(tid) for tid in site_tids] out = tensor_contract(*pts, TG, inplace=True) - psi.fermion_space.move(out.get_fermion_info()[0], min(isite)) psi |= out + psi.fermion_space.move(out.get_fermion_info()[0], min(isite)) return psi # following are all based on splitting tensors to maintain structure diff --git a/quimb/tensor/tensor_block.py b/quimb/tensor/tensor_block.py index d767de43..194020d9 100644 --- a/quimb/tensor/tensor_block.py +++ b/quimb/tensor/tensor_block.py @@ -148,7 +148,7 @@ def tensor_canonize_bond(T1, T2, absorb='right', **split_opts): if absorb == "right": new_T1, tRfact = T1.split(left_env_ix, get='tensors', absorb=absorb, **split_opts) - new_T2 = T2.contract(tRfact) + new_T2 = tRfact.contract(T2) else: tLfact, new_T2 = T2.split(shared_ix, get="tensors", absorb=absorb, **split_opts) new_T1 = T1.contract(tLfact) @@ -241,7 +241,7 @@ def astype(self, dtype, inplace=False): def ind_size(self, ind): ax = self.inds.index(ind) - return self.get_bond_info(ax) + return self.data.get_bond_info(ax) def conj(self, inplace=False): """Conjugate this tensors data (does nothing to indices). From 2ac25d49138fb18e18f500ef3f1dfbbb966d0cfe Mon Sep 17 00:00:00 2001 From: yangcal Date: Mon, 3 May 2021 16:11:53 -0700 Subject: [PATCH 48/61] split gate index fix --- quimb/tensor/fermion_2d.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 65aafd61..3ae81c5e 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -483,9 +483,11 @@ def gate_string_split_(TG, where, string, original_ts, bonds_along, t = inner_ts[i] t.multiply_index_diagonal_(bix, snew, location=location) + revert_index_map = {v: k for k, v in reindex_map.items()} for to, tn in zip(original_ts, inner_ts): + to.reindex_(revert_index_map) tn.transpose_like_(to) - to.modify(data=tn.data, inds=tn.inds) + to.modify(data=tn.data) for i, (tid, _) in enumerate(fermion_info): if i==0: From 7d4b531440f448ab1e97faaad7e50b65449724ec Mon Sep 17 00:00:00 2001 From: yangcal Date: Wed, 5 May 2021 11:21:03 -0700 Subject: [PATCH 49/61] fix bug for python backend --- quimb/tensor/block_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quimb/tensor/block_tools.py b/quimb/tensor/block_tools.py index 735589ad..53962043 100644 --- a/quimb/tensor/block_tools.py +++ b/quimb/tensor/block_tools.py @@ -18,7 +18,7 @@ def sqrt(T): def inv_with_smudge(T, cutoff=1e-10, gauge_smudge=1e-6): def _inv_with_smudge(arr): - new_arr = np.zeros_like(arr) + new_arr = np.zeros(arr.shape, dtype=arr.dtype) ind = abs(arr) > cutoff new_arr[ind] = (arr[ind] + gauge_smudge) ** -1 return new_arr From 160f579f7e00bfb0c99934c862e93789119f2029 Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 6 May 2021 12:29:45 -0700 Subject: [PATCH 50/61] doc added --- quimb/tensor/fermion_2d.py | 60 ++++++++++++++--- quimb/tensor/fermion_2d_tebd.py | 112 +++++++++++++++++++++++++++----- 2 files changed, 148 insertions(+), 24 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 3ae81c5e..c4daac12 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -1,20 +1,17 @@ -"""Classes and algorithms related to 2D tensor networks. +"""Classes and algorithms related to Fermionic 2D tensor networks. """ import re import functools from operator import add -from itertools import product, cycle, starmap +from itertools import product from collections import defaultdict -import opt_einsum as oe - from ..utils import check_opt, pairwise from .tensor_core import ( bonds, rand_uuid, oset, - tags_to_oset, - oset_union, + tags_to_oset ) from .tensor_2d import ( Rotator2D, @@ -27,13 +24,23 @@ is_lone_coo, gen_long_range_path, calc_plaquette_sizes, - calc_plaquette_map) -from .tensor_block import BlockTensorNetwork -from .fermion import FermionTensor, FermionTensorNetwork, tensor_contract + calc_plaquette_map +) +from .fermion import ( + FermionTensor, + FermionTensorNetwork, + tensor_contract +) INVERSE_CUTOFF = 1e-10 class FermionTensorNetwork2D(FermionTensorNetwork, TensorNetwork2D): + """A subclass of ``quimb.tensor.tensor_2d.TensorNetwork2D`` that overrides methods + that depend on ordering of the tensors. Reorder method is added to aid row/column-wise + operations. Environments are now computed as an entire FermionTensorNetwork so that the + plaquettes are placed correctly + + """ _EXTRA_PROPS = ( '_site_tag_id', '_row_tag_id', @@ -68,6 +75,37 @@ def flatten(self, fuse_multibonds=True, inplace=False): raise NotImplementedError def reorder(self, direction, layer_tags=None, inplace=False): + r"""Reorder all tensors either row/column-wise + + If ``direction == 'row'`` then:: + + | | | | | | | + Row 0: ─●─>●─>●─>●─>●─>●─>●─ then Row 1 + | | | | | | | + Row 1: ─●─>●─>●─>●─>●─>●─>●─ then Row 2 + | | | | | | | + Row 2: ─●─>●─>●─>●─>●─>●─>●─ + | | | | | | | + + If ``direction == 'col'`` then:: + + v v v v v v v + ─●──●──●──●──●──●──●─ + v v v v v v v + ─●──●──●──●──●──●──●─ + v v v v v v v + ─●──●──●──●──●──●──●─ + v v v v v v v + + Parameters + ---------- + direction : {"row", "column"} + The direction to reorder the entire network + layer_tags : optional + The relative order within a single coordinate + inplace : bool, optional + Whether to perform the operation inplace + """ Lx, Ly = self._Lx, self._Ly tid_map = dict() current_position = 0 @@ -135,6 +173,10 @@ def compute_environments( envs=None, **contract_boundary_opts ): + """Compute the ``self.Lx`` 1D boundary tensor networks describing + the environments of rows and columns. The returned tensor network + also contains the original plaquettes + """ direction = {"left": "col", "right": "col", "top": "row", diff --git a/quimb/tensor/fermion_2d_tebd.py b/quimb/tensor/fermion_2d_tebd.py index 6d4fa8a3..0e503bf8 100644 --- a/quimb/tensor/fermion_2d_tebd.py +++ b/quimb/tensor/fermion_2d_tebd.py @@ -26,6 +26,8 @@ def Hubbard2D(t, u, Lx, Ly, mu=0., symmetry=None): Size in y direction mu: scalar, optional Chemical potential + symmetry: {"z2",'u1', 'z22', 'u11'}, optional + Symmetry in the backend Returns ------- @@ -58,7 +60,7 @@ class LocalHam2D: The number of rows. Ly : int The number of columns. - H2 : array_like or dict[tuple[tuple[int]], array_like] + H2 : pyblock3 tensors or dict[tuple[tuple[int]], pyblock3 tensors] The two site term(s). If a single array is given, assume to be the default interaction for all nearest neighbours. If a dict is supplied, the keys should represent specific pairs of coordinates like @@ -75,7 +77,7 @@ class LocalHam2D: Attributes ---------- - terms : dict[tuple[tuple[int]], array_like] + terms : dict[tuple[tuple[int]], pyblock3 tensors] The total effective local term for each interaction (with single site terms appropriately absorbed). Each key is a pair of coordinates ``ija, ijb`` with ``ija < ijb``. @@ -217,16 +219,6 @@ def get_auto_ordering(self, order='sort', **kwargs): return ordering - def apply_to_arrays(self, fn): - """Apply the function ``fn`` to all the arrays representing terms. - """ - for k, x in self.terms.items(): - self.terms[k] = fn(x) - - def __repr__(self): - s = "" - return s.format(self.Lx, self.Ly, len(self.terms)) - def _get_location(Ti, Tj): if Ti.get_fermion_info()[1] Date: Thu, 6 May 2021 12:32:49 -0700 Subject: [PATCH 51/61] clean up old tests --- quimb/tensor/test_fermion/test_fermion_2d.py | 230 ------------------- quimb/tensor/test_fermion/test_numerics.py | 169 -------------- quimb/tensor/test_fermion/test_operators.py | 225 ------------------ 3 files changed, 624 deletions(-) delete mode 100644 quimb/tensor/test_fermion/test_fermion_2d.py delete mode 100644 quimb/tensor/test_fermion/test_numerics.py delete mode 100644 quimb/tensor/test_fermion/test_operators.py diff --git a/quimb/tensor/test_fermion/test_fermion_2d.py b/quimb/tensor/test_fermion/test_fermion_2d.py deleted file mode 100644 index 2eaddb36..00000000 --- a/quimb/tensor/test_fermion/test_fermion_2d.py +++ /dev/null @@ -1,230 +0,0 @@ -import pytest -import numpy as np -import itertools -from quimb.tensor.block_interface import BondInfo, SparseFermionTensor, U11, U1, Z4, Z2 -from quimb.tensor.fermion_gen import gen_mf_peps - -@pytest.fixture(scope='class') -def u11setup(request): - bond = BondInfo({U11(0):1, U11(2): 1, U11(1,-1):1, U11(1,1):1}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() - request.cls.G = G - request.cls.Hij = Hij - request.cls.Lx = Lx = 3 - request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="U11") - for itsr in request.cls.peps.tensor_map.values(): - itsr.data.data *= np.random.random(itsr.data.data.size) * 5 - -@pytest.fixture(scope='class') -def u1setup(request): - bond = BondInfo({U1(0):1, U1(1): 2, U1(2):1}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() - request.cls.G = G - request.cls.Hij = Hij - request.cls.Lx = Lx = 3 - request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="U1") - for itsr in request.cls.peps.tensor_map.values(): - itsr.data.data *= np.random.random(itsr.data.data.size) * 5 - -@pytest.fixture(scope='class') -def z4setup(request): - bond = BondInfo({Z4(0):2, Z4(1): 2}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() - request.cls.G = G - request.cls.Hij = Hij - request.cls.Lx = Lx = 3 - request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="z4") - for itsr in request.cls.peps.tensor_map.values(): - itsr.data.data *= np.random.random(itsr.data.data.size) * 5 - -@pytest.fixture(scope='class') -def z2setup(request): - bond = BondInfo({Z2(0):2, Z2(1): 2}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() - request.cls.G = G - request.cls.Hij = Hij - request.cls.Lx = Lx = 3 - request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="z2") - for itsr in request.cls.peps.tensor_map.values(): - itsr.data.data *= np.random.random(itsr.data.data.size) * 5 - -@pytest.mark.usefixtures('u11setup') -class TestPEPS_U11: - @pytest.mark.parametrize('where', [ - (0, 0), (0, 1), (0, 2), (2, 0), - (1, 0), (1, 1), (1, 2), (2, 1) - ]) - @pytest.mark.parametrize('contract', [False, True]) - def test_gate_2d_single_site(self, where, contract): - bond = BondInfo({U11(0):1, U11(2): 1, U11(1,-1):1, U11(1,1):1}) - G = self.G - Lx = 3 - Ly = 3 - psi = self.peps - xe = psi.compute_local_expectation({where: G}) - tn = psi.H & psi.gate(G, where, contract=contract) - assert len(tn.tensors) == 2 * Lx * Ly + int(not contract) - assert tn ^ all == pytest.approx(xe) - - @pytest.mark.parametrize( - 'contract', [False, True, 'split', 'reduce-split']) - @pytest.mark.parametrize('where', [ - [(1, 1), (2, 1)], [(2, 1), (2, 2)] - ]) - def test_gate_2d_two_site(self, where, contract): - Hij = self.Hij - psi = self.peps - xe = psi.compute_local_expectation({tuple(where): Hij}) - tn = psi.H & psi.gate(Hij, tuple(where), contract=contract) - change = {False: 1, True: -1, 'split': 0, 'reduce-split': 0}[contract] - assert len(tn.tensors) == 2 * self.Lx * self.Ly + change - assert tn ^ all == pytest.approx(xe) - - def test_contract_2d_one_layer_boundary(self): - psi = self.peps - norm = psi.make_norm() - xe = norm.contract(all, optimize='auto-hq') - xt = norm.contract_boundary(max_bond=6) - assert xt == pytest.approx(xe, rel=1e-2) - - def test_contract_2d_two_layer_boundary(self): - psi = self.peps - norm = psi.make_norm() - xe = norm.contract(all, optimize='auto-hq') - xt = norm.contract_boundary(max_bond=6, layer_tags=['KET', 'BRA']) - assert xt == pytest.approx(xe, rel=1e-2) - - @pytest.mark.parametrize("two_layer", [False, True]) - def test_compute_row_envs(self, two_layer): - psi = self.peps - norm = psi.make_norm() - ex = norm.contract(all) - if two_layer: - compress_opts = {'cutoff': 1e-6, 'max_bond': 12, - 'layer_tags': ['KET', 'BRA']} - else: - compress_opts = {'cutoff': 1e-6, 'max_bond': 8} - row_envs = norm.compute_row_environments(**compress_opts) - - for i in range(norm.Lx): - norm_i = ( - row_envs['below', i] & - row_envs['mid', i] & - row_envs['above', i] - ) - x = norm_i.contract(all) - assert x == pytest.approx(ex, rel=1e-2) - - @pytest.mark.parametrize("two_layer", [False, True]) - def test_compute_col_envs(self, two_layer): - psi = self.peps - norm = psi.make_norm() - ex = norm.contract(all) - if two_layer: - compress_opts = {'cutoff': 1e-6, 'max_bond': 12, - 'layer_tags': ['KET', 'BRA']} - else: - compress_opts = {'cutoff': 1e-6, 'max_bond': 8} - row_envs = norm.compute_col_environments(**compress_opts) - - for i in range(norm.Ly): - norm_i = ( - row_envs['left', i] & - row_envs['mid', i] & - row_envs['right', i] - ) - x = norm_i.contract(all) - assert x == pytest.approx(ex, rel=1e-2) - - def test_normalize(self): - psi = self.peps - norm = psi.make_norm().contract(all) - assert norm != pytest.approx(1.0) - psi.normalize_(balance_bonds=True, equalize_norms=True, cutoff=2e-3) - norm = psi.make_norm().contract(all) - assert norm == pytest.approx(1.0, rel=1e-2) - - def test_compute_local_expectation_one_sites(self): - peps = self.peps - coos = list(itertools.product(range(self.Lx), range(self.Ly))) - terms = {coo: self.G for coo in coos} - - expecs = peps.compute_local_expectation( - terms, - normalized=True, - return_all=True) - - norm = peps.compute_norm() - for where, G in terms.items(): - ket = peps.copy() - ket.add_tag("KET") - bra = ket.H - bra.retag({"KET": "BRA"}) - bra.mangle_inner_("*") - ket.gate_(G, where) - tn = ket & bra - out = tn.contract_boundary(max_bond=12) - assert out == pytest.approx(expecs[where][0], rel=1e-2) - assert norm == pytest.approx(expecs[where][1], rel=1e-2) - - def test_compute_local_expectation_two_sites(self): - normalized=True - peps = self.peps - Hij = self.Hij - hterms = {coos: Hij for coos in peps.gen_horizontal_bond_coos()} - vterms = {coos: Hij for coos in peps.gen_vertical_bond_coos()} - - opts = dict(cutoff=2e-3, max_bond=12, contract_optimize='random-greedy') - norm = peps.compute_norm(max_bond=12, cutoff=2e-3) - he = peps.compute_local_expectation( - hterms, normalized=normalized, return_all=True, **opts) - ve = peps.compute_local_expectation( - vterms, normalized=normalized, return_all=True, **opts) - - for where, G in hterms.items(): - ket = peps.copy() - ket.add_tag("KET") - bra = ket.H - bra.retag({"KET": "BRA"}) - bra.mangle_inner_("*") - ket.gate_(G, where, contract="reduce-split") - tn = ket & bra - out = tn.contract_boundary(max_bond=12, cutoff=2e-3) - assert out == pytest.approx(he[where][0], rel=1e-2) - assert norm == pytest.approx(he[where][1], rel=1e-2) - - for where, G in vterms.items(): - ket = peps.copy() - ket.add_tag("KET") - bra = ket.H - bra.retag({"KET": "BRA"}) - bra.mangle_inner_("*") - ket.gate_(G, where, contract="split") - tn = ket & bra - out = tn.contract_boundary(max_bond=12, cutoff=2e-3) - assert out == pytest.approx(ve[where][0], rel=1e-2) - assert norm == pytest.approx(ve[where][1], rel=1e-2) - -@pytest.mark.usefixtures('u1setup') -class TestPEPS_U1(TestPEPS_U11): - pass - -@pytest.mark.usefixtures('z4setup') -class TestPEPS_Z4(TestPEPS_U11): - pass - -@pytest.mark.usefixtures('z2setup') -class TestPEPS_Z2(TestPEPS_U11): - pass diff --git a/quimb/tensor/test_fermion/test_numerics.py b/quimb/tensor/test_fermion/test_numerics.py deleted file mode 100644 index baada7f5..00000000 --- a/quimb/tensor/test_fermion/test_numerics.py +++ /dev/null @@ -1,169 +0,0 @@ -import pytest -import numpy as np -from quimb.tensor.fermion import ( - FermionTensor, FermionTensorNetwork, tensor_contract) -from quimb.tensor.block_interface import BondInfo, U11, U1, Z2, Z4 -from pyblock3.algebra.fermion import SparseFermionTensor - -rand = SparseFermionTensor.random - -@pytest.fixture(scope='class') -def u11setup(request): - bond1 = BondInfo({U11(0):3, U11(1,1): 3, U11(1,-1):3, U11(2):3}) - bond2 = BondInfo({U11(0):5, U11(1,1): 5, U11(1,-1):5, U11(2):5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U11(1,1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(-1,-1), pattern="++-").to_flat() - - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,-1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(-1,1), pattern="+-+").to_flat() - request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) - request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) - request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) - request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) - request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - - ab = rand((bond1, bond1), dq=U11(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=U11(1,-1), pattern="++").to_flat() - Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) - Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) - Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) - request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) - yield - -@pytest.fixture(scope='class') -def u1setup(request): - bond1 = BondInfo({U1(0):3, U1(1): 3, U1(3):3, U1(2):3}) - bond2 = BondInfo({U1(0):5, U1(1): 5, U1(3):5, U1(2):5}) - - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(2), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(-2), pattern="+-+").to_flat() - - request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) - request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) - request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) - request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) - request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - - ab = rand((bond1, bond1), dq=U1(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=U1(1), pattern="++").to_flat() - Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) - Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) - Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) - request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) - yield - -@pytest.fixture(scope='class') -def z4setup(request): - bond1 = BondInfo({Z4(0):3, Z4(1): 3, Z4(3):3, Z4(2):3}) - bond2 = BondInfo({Z4(0):5, Z4(1): 5, Z4(3):5, Z4(2):5}) - - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z4(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(2), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(0), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(1), pattern="+-+").to_flat() - request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) - request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) - request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) - request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) - request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - - ab = rand((bond1, bond1), dq=Z4(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z4(1), pattern="++").to_flat() - Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) - Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) - Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) - request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) - yield - -@pytest.fixture(scope='class') -def z2setup(request): - bond1 = BondInfo({Z2(0):3, Z2(1): 3}) - bond2 = BondInfo({Z2(0):5, Z2(1): 5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(0), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z2(1), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(0), pattern="+-+").to_flat() - request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) - request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) - request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) - request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) - request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - - ab = rand((bond1, bond1), dq=Z2(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z2(1), pattern="++").to_flat() - Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) - Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) - Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) - Tbc1 = FermionTensor(bc.dagger, inds=['c','b1'], tags=["bc1"]) - request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) - yield - -@pytest.mark.usefixtures('u11setup') -class TestU11: - def test_backend(self): - Tegbc = tensor_contract(self.Tabc, self.Tega, output_inds=("e","g","b", "c")) - egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) - err = (egbc - Tegbc.data).norm() - assert err < 1e-10 - - def test_contract_between(self): - tn1 = self.tn.copy() - tn1.contract_between("abc", "ega") - Tegbc = tn1["abc"].transpose("e","g","b","c") - egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) - err = (egbc - Tegbc.data).norm() - assert err < 1e-10 - - def test_contract_all(self): - result = self.tn.contract(all) - egbc = np.tensordot(self.ega, self.abc, axes=[(2,),(0,)]) - deg1 = np.tensordot(self.bcd, egbc, axes=[(0,1),(2,3)]) - ref_val = np.tensordot(self.deg, deg1, axes=[(0,1,2),]*2).data[0] - err = abs(result - ref_val) - assert err < 1e-10 - - def test_contract_ind(self): - tn1 = self.tn.copy() - tn1.contract_ind("d") - out = tn1["deg"].transpose("e","g","b","c") - egbc = np.tensordot(self.deg, self.bcd, axes=[(0,),(2,)]) - err = (egbc - out.data).norm() - assert err < 1e-10 - - def test_balance_bonds(self): - norm = self.norm - exact = norm.contract(all, optimize="auto-hq") - norm1 = norm.balance_bonds() - exact_bb = norm1.contract(all, optimize="auto-hq") - assert exact_bb == pytest.approx(exact, rel=1e-2) - for tid, tsr in norm.tensor_map.items(): - tsr1 = norm1.tensor_map[tid] - assert (tsr1-tsr).data.norm() >1e-10 - - def test_equlaize_norm(self): - norm = self.norm - exact = norm.contract(all, optimize="auto-hq") - norm1 = norm.equalize_norms() - exact_en = norm1.contract(all, optimize="auto-hq") - assert exact_en == pytest.approx(exact, rel=1e-2) - ref1 = list(norm1.tensor_map.values())[0].norm() - for tid, tsr in norm.tensor_map.items(): - tsr1 = norm1.tensor_map[tid] - assert tsr1.norm() == pytest.approx(ref1, rel=1e-2) - -@pytest.mark.usefixtures('u1setup') -class TestU1(TestU11): - pass - -@pytest.mark.usefixtures('z4setup') -class TestZ4(TestU11): - pass - -@pytest.mark.usefixtures('z2setup') -class TestZ2(TestU11): - pass diff --git a/quimb/tensor/test_fermion/test_operators.py b/quimb/tensor/test_fermion/test_operators.py deleted file mode 100644 index aa3a8785..00000000 --- a/quimb/tensor/test_fermion/test_operators.py +++ /dev/null @@ -1,225 +0,0 @@ -import pytest -import numpy as np -import itertools -from quimb.tensor.fermion_2d import FPEPS -from pyblock3.algebra.core import SubTensor -from pyblock3.algebra import fermion_ops -from quimb.tensor.block_interface import U11, U1, Z4, Z2, SparseFermionTensor -from quimb.tensor.fermion_gen import gen_mf_peps - -@pytest.fixture(scope='class') -def u11setup(request): - request.cls.t = 2 - request.cls.U = 4 - request.cls.tau = 0.1 - request.cls.mu = 0.2 - request.cls.symmetry = U11 - states = np.ones([1,1]) * .5 ** .5 - blocks = [SubTensor(reduced=states, q_labels=(U11(0),U11(1,1))), #0+ - SubTensor(reduced=states, q_labels=(U11(1,1),U11(0)))] #+0, eigenstate of hopping - request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - blocks=[] - states = np.ones([1,1]) * .5 - blocks = [SubTensor(reduced=states, q_labels=(U11(2), U11(0))), - SubTensor(reduced=states, q_labels=(U11(0), U11(2))), - SubTensor(reduced=-states, q_labels=(U11(1,1), U11(1,-1))), - SubTensor(reduced=states, q_labels=(U11(1,-1), U11(1,1)))] - request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - Lx = Ly = 4 - request.cls.Lx = Lx - request.cls.Ly = Ly - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry='u11') - request.cls.fac = (0.5, 0.3) - -@pytest.fixture(scope='class') -def u1setup(request): - request.cls.t = 2 - request.cls.U = 4 - request.cls.tau = 0.1 - request.cls.mu = 0.2 - request.cls.symmetry = U1 - states = np.zeros([1,2]) - states[0,0] = .5 ** .5 - blocks = [SubTensor(reduced=states, q_labels=(U1(0),U1(1))), #0+ - SubTensor(reduced=states, q_labels=(U1(1),U1(0)))] #+0, eigenstate of hopping - request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - blocks=[] - states = np.zeros([2,2]) - states[0,1] = -.5 - states[1,0] = .5 - blocks = [SubTensor(reduced=np.ones([1,1]) * .5, q_labels=(U1(2), U1(0))), - SubTensor(reduced=np.ones([1,1]) * .5, q_labels=(U1(0), U1(2))), - SubTensor(reduced=states, q_labels=(U1(1), U1(1)))] - request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - Lx = Ly = 4 - request.cls.Lx = Lx - request.cls.Ly = Ly - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry='u1') - request.cls.fac = (0.5, 0.3) - -@pytest.fixture(scope='class') -def z4setup(request): - request.cls.t = 2 - request.cls.U = 4 - request.cls.tau = 0.1 - request.cls.mu = 0.2 - request.cls.symmetry = Z4 - states = np.zeros([2,2]) - states[0,0] = .5 ** .5 - blocks = [SubTensor(reduced=states, q_labels=(Z4(0),Z4(1))), #0+ - SubTensor(reduced=states, q_labels=(Z4(1),Z4(0)))] #+0, eigenstate of hopping - request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - blocks=[] - states = np.zeros([2,2]) - states[1,0] = .5 - blocks = [SubTensor(reduced=states, q_labels=(Z4(0), Z4(0))), - SubTensor(reduced=states.T, q_labels=(Z4(0), Z4(0))), - SubTensor(reduced=-states.T, q_labels=(Z4(1), Z4(1))), - SubTensor(reduced=states, q_labels=(Z4(1), Z4(1)))] - request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - Lx = Ly = 4 - request.cls.Lx = Lx - request.cls.Ly = Ly - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry='z4') - request.cls.fac = (0.5, 0.3) - -@pytest.fixture(scope='class') -def z2setup(request): - request.cls.t = 2 - request.cls.U = 4 - request.cls.tau = 0.1 - request.cls.mu = 0.2 - request.cls.symmetry = Z2 - states = np.zeros([2,2]) - states[0,0] = .5 ** .5 - blocks = [SubTensor(reduced=states, q_labels=(Z2(0),Z2(1))), #0+ - SubTensor(reduced=states, q_labels=(Z2(1),Z2(0)))] #+0, eigenstate of hopping - request.cls.hop_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - blocks=[] - states = np.zeros([2,2]) - states[1,0] = .5 - blocks = [SubTensor(reduced=states, q_labels=(Z2(0), Z2(0))), - SubTensor(reduced=states.T, q_labels=(Z2(0), Z2(0))), - SubTensor(reduced=-states.T, q_labels=(Z2(1), Z2(1))), - SubTensor(reduced=states, q_labels=(Z2(1), Z2(1)))] - - request.cls.hop_exp_psi = SparseFermionTensor(blocks=blocks, pattern="++").to_flat() - - Lx = Ly = 4 - request.cls.Lx = Lx - request.cls.Ly = Ly - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry='z2') - request.cls.fac = (0.5, 0.3) - -@pytest.mark.usefixtures('u11setup') -class TestU11: - def test_hopping(self): - t = self.t - hop = fermion_ops.H1(-t, symmetry=self.symmetry) - ket = self.hop_psi - ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) - bra = ket.dagger - expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(-t, rel=1e-2) - - def test_hopping_exponential(self): - t = self.t - tau = self.tau - hop = fermion_ops.H1(-t, symmetry=self.symmetry) - #hop_exp = hop.to_exponential(-tau) - hop_exp = fermion_ops.get_flat_exponential(hop, -tau) - ket = self.hop_exp_psi - bra = ket.dagger - ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) - expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(2*t, rel=1e-2) - - ket1 = np.tensordot(hop_exp, ket, axes=((2,3),(0,1))) - expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(np.e**(-2*t*tau), rel=1e-2) - - def test_onsite_u(self): - U = self.U - uop = fermion_ops.onsite_U(U, symmetry=self.symmetry) - terms = {coo: uop for coo in itertools.product(range(self.Lx), range(self.Ly))} - psi = self.peps - state_array = self.state_array - result = psi.compute_local_expectation(terms, normalized=False, return_all=True) - for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): - ref = U if state_array[ix,iy]==3 else 0. - assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) - - def test_sz(self): - sz = fermion_ops.measure_SZ(symmetry=self.symmetry) - terms = {coo: sz for coo in itertools.product(range(self.Lx), range(self.Ly))} - result = self.peps.compute_local_expectation(terms, normalized=False, return_all=True) - ref_dic = {0:0., 1:0.5, 2:-.5, 3:0.} - for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): - state = self.state_array[ix,iy] - ref = ref_dic[state] - assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) - - def test_n(self): - nop = fermion_ops.ParticleNumber(symmetry=self.symmetry) - terms = {coo: nop for coo in itertools.product(range(self.Lx), range(self.Ly))} - result = self.peps.compute_local_expectation(terms, normalized=False, return_all=True) - ref_dic = {0:0., 1:1, 2:1, 3:2} - for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): - state = self.state_array[ix,iy] - ref = ref_dic[state] - assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) - - def test_exponential_u(self): - U = self.U - tau = self.tau - uop = fermion_ops.onsite_U(U, symmetry=self.symmetry) - uop_exp = fermion_ops.get_flat_exponential(uop, -tau) - terms = {coo: uop_exp for coo in itertools.product(range(self.Lx), range(self.Ly))} - result = self.peps.compute_local_expectation(terms, normalized=False, return_all=True) - for ix, iy in itertools.product(range(self.Lx), range(self.Ly)): - ref = np.e**(-tau*U) if self.state_array[ix,iy]==3 else 1. - assert ref == pytest.approx(result[(ix,iy)][0], rel=1e-2) - - def test_hubbard(self): - mu = self.mu - hop = fermion_ops.H1(-self.t, symmetry=self.symmetry) - uop = fermion_ops.onsite_U(self.U, symmetry=self.symmetry) - nop = fermion_ops.ParticleNumber(symmetry=self.symmetry) - faca, facb = self.fac - hub = fermion_ops.Hubbard(self.t, self.U, mu=mu, fac=self.fac, symmetry=self.symmetry) - ket = self.hop_exp_psi - bra = ket.dagger - - ket1 = np.tensordot(hop, ket, axes=((2,3),(0,1))) - ket1 = ket1 + faca*np.tensordot(uop, ket, axes=((-1,),(0,))) - ket1 = ket1 + facb*np.tensordot(uop, ket, axes=((-1,),(1,))).transpose([1,0]) - ket1 = ket1 + faca*mu*np.tensordot(nop, ket, axes=((-1,),(0,))) - ket1 = ket1 + facb*mu*np.tensordot(nop, ket, axes=((-1,),(1,))).transpose([1,0]) - expec = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - - ket1 = np.tensordot(hub, ket, axes=((2,3),(0,1))) - expec1 = np.tensordot(bra, ket1, axes=((1,0),(0,1))).data[0] - assert expec == pytest.approx(expec1, rel=1e-2) - -@pytest.mark.usefixtures('u1setup') -class TestU1(TestU11): - pass - -@pytest.mark.usefixtures('z4setup') -class TestZ4(TestU11): - pass - -@pytest.mark.usefixtures('z2setup') -class TestZ2(TestU11): - pass From b4b949653120adfe46a8a61bfa58919e7ac14bc0 Mon Sep 17 00:00:00 2001 From: yangcal Date: Thu, 6 May 2021 12:47:16 -0700 Subject: [PATCH 52/61] remove test files --- docs/examples/debug.ipynb | 4801 ------------------------------ docs/examples/debug.py | 105 - docs/examples/ex_fermion2d.ipynb | 433 --- 3 files changed, 5339 deletions(-) delete mode 100644 docs/examples/debug.ipynb delete mode 100644 docs/examples/debug.py delete mode 100644 docs/examples/ex_fermion2d.ipynb diff --git a/docs/examples/debug.ipynb b/docs/examples/debug.ipynb deleted file mode 100644 index 0739e88d..00000000 --- a/docs/examples/debug.ipynb +++ /dev/null @@ -1,4801 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[[0. 0.]\n", - " [0. 0.]]\n", - "\n", - " [[0. 0.]\n", - " [0. 0.]]]\n", - "\n", - "\n", - " [[[0. 0.]\n", - " [1. 0.]]\n", - "\n", - " [[0. 0.]\n", - " [0. 0.]]]]\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import itertools\n", - "from quimb.tensor.fermion_2d_tebd import Hubbard2D, SimpleUpdate\n", - "from pyblock3.algebra import fermion_operators as ops\n", - "\n", - "t=1\n", - "u=4\n", - "Lx = 4\n", - "Ly = 1\n", - "#mu = -0.9\n", - "mu = 0\n", - "Ham = Hubbard2D(t, u, Lx, Ly, mu=mu)\n", - "#efci = -5.702748483462062\n", - "\n", - "state_array = np.zeros([Lx,Ly])\n", - "state_array[0,0] = state_array[2,0] = 1\n", - "state_array[1,0] = state_array[3,0] = 2\n", - "\n", - "from quimb.tensor.fermion_2d import gen_mf_peps\n", - "\n", - "psi = gen_mf_peps(state_array) # this is now a 2d mean field PEPS\n", - "\n", - "\n", - "sz = ops.measure_sz()\n", - "nop = ops.count_n()\n", - "\n", - "sz_ops = {(ix,iy): sz for ix, iy in itertools.product(range(Lx), range(Ly))}\n", - "n_ops = {(ix,iy): nop for ix, iy in itertools.product(range(Lx), range(Ly))}\n", - "\n", - "\n", - "book = {(0,0):\"0\", (0,1):\"+-\", (1,0):\"+\", (1,1):\"-\"}\n", - "\n", - "def print_block(blk):\n", - " qlab= [iq.n for iq in blk.q_labels]\n", - " ind = np.where(abs(np.asarray(blk)) > 1e-20)\n", - " need_print = False\n", - " for ixs in zip(*ind):\n", - " val = np.asarray(blk)[ixs]\n", - " desc = \"|\"\n", - " for ix, s in enumerate(ixs):\n", - " desc += book[(qlab[ix], s)]\n", - " if ix != len(ixs)-1:\n", - " desc += \",\"\n", - " else:\n", - " desc += \">\"\n", - " \n", - " \n", - " if (desc.count(\"+\"), desc.count(\"-\")) != (2,2):\n", - " desc = str(val) + desc\n", - " print(desc)\n", - " need_print = True\n", - " return need_print\n", - "\n", - "\n", - "ket = psi.contract(all)\n", - "tsr = ket.data.to_sparse()\n", - "for iblk in tsr:\n", - " print(np.asarray(iblk))\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 1%| | 3/278 [00:00<00:19, 14.40it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.4811640520882685e-18|-,+-,+-,+>\n", - "Ending Cycle1\n", - "-2.690933730486756e-18|-,0,0,+>\n", - "6.647677868015264e-19|-,+-,+-,+>\n", - "Ending Cycle2\n", - "-1.4076588010241515e-19|+,0,+-,+>\n", - "1.0962867872859863e-20|+,+-,0,+>\n", - "-1.6483979961684814e-20|+,+-,+-,+>\n", - "-3.754476439949827e-18|-,0,0,+>\n", - "3.947768494909605e-20|-,0,0,->\n", - "-2.0214955367129208e-18|-,0,+-,->\n", - "3.014867750075675e-18|-,+-,+-,+>\n", - "Ending Cycle3\n", - "-5.147489582975998e-19|+,0,+-,+>\n", - "-5.45068178941678e-20|+,+-,+-,+>\n", - "1.0831062366236802e-17|-,0,0,+>\n", - "7.57074880532869e-20|-,0,0,->\n", - "-2.852902508331458e-18|-,0,+-,->\n", - "-3.655938182268932e-18|-,+-,+-,+>\n", - "Ending Cycle4\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 3%|▎ | 7/278 [00:00<00:16, 16.56it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.1134122737443018e-20|+,0,0,->\n", - "-3.5261571686548953e-19|+,0,+-,+>\n", - "-1.721993744814485e-20|+,+-,0,+>\n", - "-5.0132712421173824e-20|+,+-,+-,+>\n", - "2.208013863995963e-18|-,0,0,+>\n", - "1.67914117889124e-19|-,0,0,->\n", - "-3.399289450447227e-18|-,0,+-,->\n", - "1.0368402446081385e-20|-,+-,0,->\n", - "2.4721611246433953e-18|-,+-,+-,+>\n", - "Ending Cycle5\n", - "-8.366488048212868e-19|+,0,+-,+>\n", - "-1.4898601211991563e-20|+,+-,0,+>\n", - "-4.591316550403315e-20|+,+-,+-,+>\n", - "9.227233320298202e-18|-,0,0,+>\n", - "4.962510871950253e-19|-,0,0,->\n", - "-6.1521063335740584e-18|-,0,+-,->\n", - "1.4484136888054943e-17|-,+-,+-,+>\n", - "Ending Cycle6\n", - "2.908695151580684e-20|+,0,0,->\n", - "-2.2924658772524627e-18|+,0,+-,+>\n", - "-3.8589308032096353e-20|+,+-,0,+>\n", - "-3.976639089345718e-20|+,+-,+-,+>\n", - "-4.986719666218777e-17|-,0,0,+>\n", - "6.765750669177418e-19|-,0,0,->\n", - "-6.828495888116361e-18|-,0,+-,->\n", - "-2.3982325311725744e-20|-,+-,0,->\n", - "4.9967845243611465e-17|-,+-,+-,+>\n", - "Ending Cycle7\n", - "2.175197919504413e-20|+,0,0,+>\n", - "7.574690141173284e-20|+,0,0,->\n", - "-3.6077155181618874e-18|+,0,+-,+>\n", - "-1.0006423748782682e-19|+,+-,0,+>\n", - "-4.20818487429506e-20|+,+-,+-,+>\n", - "-5.952098621759206e-17|-,0,0,+>\n", - "7.73516336625335e-19|-,0,0,->\n", - "-7.429876512571447e-18|-,0,+-,->\n", - "-3.198743535381869e-20|-,+-,0,->\n", - "5.915893330927149e-17|-,+-,+-,+>\n", - "Ending Cycle8\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 4%|▍ | 11/278 [00:00<00:16, 16.31it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-6.206033872474593e-20|+,0,0,+>\n", - "8.698661157653424e-20|+,0,0,->\n", - "-6.309972456427634e-18|+,0,+-,+>\n", - "1.4968858228284988e-19|+,+-,0,+>\n", - "-4.0063902616858844e-20|+,+-,+-,+>\n", - "-2.0790254255595707e-18|-,0,0,+>\n", - "-2.1892597925028564e-19|-,0,0,->\n", - "-7.535401938273105e-18|-,0,+-,->\n", - "-4.998479575074961e-20|-,+-,0,->\n", - "3.5641379966027054e-17|-,+-,+-,+>\n", - "Ending Cycle9\n", - "-5.41148901869004e-20|+,0,0,->\n", - "-5.8221409211995494e-18|+,0,+-,+>\n", - "1.3202723437734956e-19|+,+-,0,+>\n", - "-6.062230832952462e-20|+,+-,+-,+>\n", - "4.1796090700284833e-17|-,0,0,+>\n", - "6.84785472116851e-20|-,0,0,->\n", - "-7.991610384398805e-18|-,0,+-,->\n", - "1.0163934172787189e-20|-,+-,0,->\n", - "-2.750414584771832e-17|-,+-,+-,+>\n", - "Ending Cycle10\n", - "-3.953614635798957e-20|+,0,0,+>\n", - "-1.258520990524521e-19|+,0,0,->\n", - "-1.0250466905429107e-17|+,0,+-,+>\n", - "-4.9263830868745044e-18|+,+-,0,+>\n", - "-1.5912868150158506e-19|+,+-,+-,+>\n", - "-2.6736252299797266e-17|-,0,0,+>\n", - "2.1167298745725418e-19|-,0,0,->\n", - "-8.406229755636134e-18|-,0,+-,->\n", - "4.150228020432218e-17|-,+-,+-,+>\n", - "Ending Cycle11\n", - "-9.050936096842016e-20|+,0,0,+>\n", - "-2.282427807102823e-19|+,0,0,->\n", - "-5.22532184767453e-18|+,0,+-,+>\n", - "5.995421913117511e-19|+,+-,0,+>\n", - "1.7663317826037022e-19|+,+-,+-,+>\n", - "-1.3456606560966035e-20|+,+-,+-,->\n", - "-1.1670287940577793e-17|-,0,0,+>\n", - "-2.0217502723837946e-19|-,0,0,->\n", - "-1.1263655850878233e-17|-,0,+-,->\n", - "-7.923467209229067e-18|-,+-,+-,+>\n", - "Ending Cycle12\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 5%|▌ | 15/278 [00:00<00:16, 16.34it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-3.3979425706569436e-19|+,0,0,->\n", - "-1.5312738524674893e-18|+,0,+-,+>\n", - "5.956665817376128e-19|+,+-,0,+>\n", - "1.7856318633860574e-19|+,+-,+-,+>\n", - "-1.6305606524384538e-20|+,+-,+-,->\n", - "1.5389602794510618e-16|-,0,0,+>\n", - "-2.6118229497523656e-19|-,0,0,->\n", - "-1.1646390677468351e-17|-,0,+-,->\n", - "4.4315618564459675e-20|-,+-,0,->\n", - "-1.4056066691570284e-16|-,+-,+-,+>\n", - "Ending Cycle13\n", - "-1.4544453536893518e-19|+,0,0,+>\n", - "-4.141031211482446e-19|+,0,0,->\n", - "-2.8298429910521356e-18|+,0,+-,+>\n", - "1.0483900463571322e-18|+,+-,0,+>\n", - "1.6792642183213642e-19|+,+-,+-,+>\n", - "-3.349841255593494e-20|+,+-,+-,->\n", - "2.4464220222694207e-16|-,0,0,+>\n", - "-7.94240440400761e-19|-,0,0,->\n", - "-1.2033630370552614e-17|-,0,+-,->\n", - "9.376886278099667e-20|-,+-,0,->\n", - "-2.1871808387021538e-16|-,+-,+-,+>\n", - "Ending Cycle14\n", - "7.456487043041089e-20|+,0,0,+>\n", - "-4.339293605925922e-19|+,0,0,->\n", - "-3.5814336981627727e-19|+,0,+-,+>\n", - "7.319982893506588e-19|+,+-,0,+>\n", - "1.7316974221948745e-18|+,+-,+-,+>\n", - "-4.6253463866648805e-20|+,+-,+-,->\n", - "2.5046305310646436e-16|-,0,0,+>\n", - "-6.986806197967268e-19|-,0,0,->\n", - "-8.472441224270363e-18|-,0,+-,->\n", - "1.5350393922261541e-19|-,+-,0,->\n", - "-2.1804739835018946e-16|-,+-,+-,+>\n", - "Ending Cycle15\n", - "-1.7283790272467e-19|+,0,0,+>\n", - "-1.0466711947588817e-19|+,0,0,->\n", - "6.397675947610903e-18|+,0,+-,+>\n", - "1.564217396097236e-18|+,+-,0,+>\n", - "1.765047792384793e-18|+,+-,+-,+>\n", - "-3.683220389738631e-20|+,+-,+-,->\n", - "3.8328238121600567e-16|-,0,0,+>\n", - "-1.0293768255775625e-18|-,0,0,->\n", - "-8.736232260229355e-18|-,0,+-,->\n", - "1.3457473558290438e-19|-,+-,0,->\n", - "-3.9758740605195705e-16|-,+-,+-,+>\n", - "Ending Cycle16\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 7%|▋ | 19/278 [00:01<00:15, 16.50it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-2.3678184427595324e-19|+,0,0,+>\n", - "3.351708377632563e-19|+,0,0,->\n", - "3.482961904568107e-19|+,0,+-,+>\n", - "2.3187399087173274e-18|+,+-,0,+>\n", - "1.8341137951460192e-18|+,+-,+-,+>\n", - "-2.1979502806192116e-20|+,+-,+-,->\n", - "3.9389063688269594e-16|-,0,0,+>\n", - "3.5745047182580076e-18|-,0,0,->\n", - "-5.2132622026046446e-18|-,0,+-,->\n", - "1.5017531333784842e-19|-,+-,0,->\n", - "-3.816319210763194e-16|-,+-,+-,+>\n", - "Ending Cycle17\n", - "1.089347724745079e-19|+,0,0,+>\n", - "-1.2551284633971573e-20|+,0,0,->\n", - "3.642381619225696e-18|+,0,+-,+>\n", - "2.045381570815359e-18|+,+-,0,+>\n", - "1.8307761157967957e-18|+,+-,+-,+>\n", - "4.4299083440145985e-16|-,0,0,+>\n", - "1.0489463031070272e-17|-,0,0,->\n", - "1.5075067231790956e-19|-,0,+-,->\n", - "-3.827786916958857e-19|-,+-,0,->\n", - "-4.170670840011947e-16|-,+-,+-,+>\n", - "Ending Cycle18\n", - "-1.6305273566996916e-19|+,0,0,+>\n", - "8.568632867654818e-19|+,0,0,->\n", - "1.0875172010993105e-17|+,0,+-,+>\n", - "2.694959104088856e-18|+,+-,0,+>\n", - "1.9124213564779536e-18|+,+-,+-,+>\n", - "-1.046277109163695e-20|+,+-,+-,->\n", - "5.291412722288259e-16|-,0,0,+>\n", - "1.914365405671617e-17|-,0,0,->\n", - "7.897759617584958e-18|-,0,+-,->\n", - "-1.3208964272715387e-18|-,+-,0,->\n", - "-4.3867545949917616e-16|-,+-,+-,+>\n", - "Ending Cycle19\n", - "-2.898321257235676e-19|+,0,0,+>\n", - "9.537196472705988e-19|+,0,0,->\n", - "-1.593341273428101e-18|+,0,+-,+>\n", - "3.412912308206144e-18|+,+-,0,+>\n", - "2.0668986539055894e-18|+,+-,+-,+>\n", - "6.326550712718716e-16|-,0,0,+>\n", - "3.1942847892465277e-17|-,0,0,->\n", - "1.4357913716959002e-17|-,0,+-,->\n", - "-2.4681313465559895e-18|-,+-,0,->\n", - "-5.769988809058549e-16|-,+-,+-,+>\n", - "Ending Cycle20\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 8%|▊ | 23/278 [00:01<00:16, 15.80it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.7214460838763296e-19|+,0,0,+>\n", - "1.46007270172818e-18|+,0,0,->\n", - "-2.2049899716256475e-18|+,0,+-,+>\n", - "2.5974795816844535e-18|+,+-,0,+>\n", - "2.351875929839277e-18|+,+-,+-,+>\n", - "-5.506302839841519e-20|+,+-,+-,->\n", - "7.204807916286706e-16|-,0,0,+>\n", - "5.1396715695506545e-17|-,0,0,->\n", - "2.50369329499009e-17|-,0,+-,->\n", - "-4.1834130982041465e-18|-,+-,0,->\n", - "-5.950214497565966e-16|-,+-,+-,+>\n", - "Ending Cycle21\n", - "-2.941276290792111e-20|+,0,0,+>\n", - "1.2978645590420262e-18|+,0,0,->\n", - "2.2319469066834516e-18|+,0,+-,+>\n", - "3.1005375911011856e-18|+,+-,0,+>\n", - "2.543475962287738e-18|+,+-,+-,+>\n", - "-1.6218809509822006e-19|+,+-,+-,->\n", - "7.241115451281419e-16|-,0,0,+>\n", - "8.294945364610157e-17|-,0,0,->\n", - "3.974447313717897e-17|-,0,+-,->\n", - "-6.841010822823344e-18|-,+-,0,->\n", - "-6.010531441888892e-16|-,+-,+-,+>\n", - "Ending Cycle22\n", - "-1.6981234810751007e-19|+,0,0,+>\n", - "1.6006404660121053e-18|+,0,0,->\n", - "-1.5964878031734637e-18|+,0,+-,+>\n", - "1.5049920847812507e-17|+,+-,0,+>\n", - "2.664671873844335e-18|+,+-,+-,+>\n", - "-2.011338429259276e-19|+,+-,+-,->\n", - "1.0483071075465505e-15|-,0,0,+>\n", - "1.0791623757069506e-16|-,0,0,->\n", - "5.051351740683385e-17|-,0,+-,->\n", - "-3.937859891119081e-18|-,+-,0,->\n", - "-7.331336327707496e-16|-,+-,+-,+>\n", - "Ending Cycle23\n", - "4.50602860125725e-19|+,0,0,+>\n", - "7.952223961802406e-19|+,0,0,->\n", - "-6.308955386990692e-18|+,0,+-,+>\n", - "2.432126592179979e-18|+,+-,0,+>\n", - "2.8206911659755095e-18|+,+-,+-,+>\n", - "-3.4801542165378114e-19|+,+-,+-,->\n", - "4.3703829485712083e-16|-,0,0,+>\n", - "1.1051455896221495e-16|-,0,0,->\n", - "4.9574806673174024e-17|-,0,+-,->\n", - "8.247088228123455e-19|-,+-,0,->\n", - "-1.074647535509776e-16|-,+-,+-,+>\n", - "Ending Cycle24\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 10%|▉ | 27/278 [00:01<00:16, 15.49it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-3.020450361386837e-19|+,0,0,+>\n", - "2.1132493403554158e-18|+,0,0,->\n", - "-9.873794233571388e-18|+,0,+-,+>\n", - "3.591910962816781e-18|+,+-,0,+>\n", - "2.7816546675253794e-18|+,+-,+-,+>\n", - "-4.638189812926381e-19|+,+-,+-,->\n", - "-2.2171034337352293e-16|-,0,0,+>\n", - "1.1190653134683816e-16|-,0,0,->\n", - "4.8872786831787115e-17|-,0,+-,->\n", - "1.286125091227644e-18|-,+-,0,->\n", - "5.671482193670399e-16|-,+-,+-,+>\n", - "Ending Cycle25\n", - "1.6559153535952385e-18|+,0,0,->\n", - "4.490386959030662e-17|+,0,+-,+>\n", - "5.025953335534338e-17|+,+-,0,+>\n", - "2.8213381122593666e-18|+,+-,+-,+>\n", - "-5.344714157000752e-19|+,+-,+-,->\n", - "-9.205908098730935e-16|-,0,0,+>\n", - "1.1373487829562784e-16|-,0,0,->\n", - "4.8094777415007675e-17|-,0,+-,->\n", - "1.7365969213090023e-18|-,+-,0,->\n", - "1.26855276074545e-15|-,+-,+-,+>\n", - "-1.908467102236751e-20|-,+-,+-,->\n", - "Ending Cycle26\n", - "-3.631040636460715e-19|+,0,0,+>\n", - "2.6421464613548787e-18|+,0,0,->\n", - "5.0297447185136064e-17|+,0,+-,+>\n", - "4.308864627408166e-17|+,+-,0,+>\n", - "3.0394995904512218e-18|+,+-,+-,+>\n", - "-6.51593959655515e-19|+,+-,+-,->\n", - "-1.798064411359649e-15|-,0,0,+>\n", - "1.1547901182415857e-16|-,0,0,->\n", - "4.7310933889640457e-17|-,0,+-,->\n", - "2.2212205709645063e-18|-,+-,0,->\n", - "2.19149823496658e-15|-,+-,+-,+>\n", - "-3.383030762176656e-20|-,+-,+-,->\n", - "Ending Cycle27\n", - "-6.544774159274846e-19|+,0,0,+>\n", - "2.9811385039554718e-18|+,0,0,->\n", - "5.216758387675893e-17|+,0,+-,+>\n", - "4.546400999824044e-17|+,+-,0,+>\n", - "3.381637935434836e-18|+,+-,+-,+>\n", - "-7.631674562224796e-19|+,+-,+-,->\n", - "-2.6608756930054038e-15|-,0,0,+>\n", - "1.1709621816093462e-16|-,0,0,->\n", - "4.654008668576709e-17|-,0,+-,->\n", - "2.6958776466999315e-18|-,+-,0,->\n", - "2.9801903541857302e-15|-,+-,+-,+>\n", - "-4.8804378660976634e-20|-,+-,+-,->\n", - "Ending Cycle28\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 11%|█ | 31/278 [00:01<00:15, 15.87it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-3.2740286601379065e-19|+,0,0,+>\n", - "3.6181622230952764e-18|+,0,0,->\n", - "4.975707070305895e-17|+,0,+-,+>\n", - "4.6828971305687844e-17|+,+-,0,+>\n", - "3.6589305668401086e-18|+,+-,+-,+>\n", - "-7.824756542243542e-19|+,+-,+-,->\n", - "-3.492236575128159e-15|-,0,0,+>\n", - "1.1868818089352798e-16|-,0,0,->\n", - "4.575709501242393e-17|-,0,+-,->\n", - "3.210254072742331e-18|-,+-,0,->\n", - "3.894358228058044e-15|-,+-,+-,+>\n", - "-4.368924133452725e-20|-,+-,+-,->\n", - "Ending Cycle29\n", - "-3.198029417483776e-20|+,0,0,+>\n", - "3.1427589711616758e-18|+,0,0,->\n", - "5.0311639283780974e-17|+,0,+-,+>\n", - "4.7539754569997343e-17|+,+-,0,+>\n", - "3.711913561109305e-18|+,+-,+-,+>\n", - "-9.67520055425027e-19|+,+-,+-,->\n", - "-4.326372584497513e-15|-,0,0,+>\n", - "1.2046030762911578e-16|-,0,0,->\n", - "4.506910109387626e-17|-,0,+-,->\n", - "3.764952317864687e-18|-,+-,0,->\n", - "4.757243917119685e-15|-,+-,+-,+>\n", - "-5.861058380639513e-20|-,+-,+-,->\n", - "Ending Cycle30\n", - "-2.438786942168298e-19|+,0,0,+>\n", - "1.8177018043986195e-18|+,0,0,->\n", - "7.422493243782107e-17|+,0,+-,+>\n", - "4.887999963656183e-17|+,+-,0,+>\n", - "3.7342949105335556e-18|+,+-,+-,+>\n", - "-1.4771193510269404e-18|+,+-,+-,->\n", - "-5.294001386200611e-15|-,0,0,+>\n", - "1.2284517137776632e-16|-,0,0,->\n", - "4.4650584029455506e-17|-,0,+-,->\n", - "4.2626617608803635e-18|-,+-,0,->\n", - "5.679493343221942e-15|-,+-,+-,+>\n", - "-8.407456662182579e-20|-,+-,+-,->\n", - "Ending Cycle31\n", - "8.752462792112036e-19|+,0,0,+>\n", - "2.0483979302211045e-18|+,0,0,->\n", - "6.983004811598494e-17|+,0,+-,+>\n", - "4.690513055357274e-17|+,+-,0,+>\n", - "3.882003732928379e-18|+,+-,+-,+>\n", - "-1.5769148556045291e-18|+,+-,+-,->\n", - "-6.158890497662757e-15|-,0,0,+>\n", - "1.2638508353532459e-16|-,0,0,->\n", - "4.449247367293366e-17|-,0,+-,->\n", - "4.649505153390301e-18|-,+-,0,->\n", - "6.554261024850796e-15|-,+-,+-,+>\n", - "-7.330332932798533e-20|-,+-,+-,->\n", - "Ending Cycle32\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 13%|█▎ | 35/278 [00:02<00:14, 16.51it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.8849033786683545e-19|+,0,0,+>\n", - "9.551602393752595e-19|+,0,0,->\n", - "7.35938651080324e-17|+,0,+-,+>\n", - "4.6264168930273687e-17|+,+-,0,+>\n", - "4.21532647492211e-18|+,+-,+-,+>\n", - "-1.7698836483946595e-18|+,+-,+-,->\n", - "-6.849759656954023e-15|-,0,0,+>\n", - "1.2938044078957348e-16|-,0,0,->\n", - "4.429996579262665e-17|-,0,+-,->\n", - "4.872712428046395e-18|-,+-,0,->\n", - "7.37000135818254e-15|-,+-,+-,+>\n", - "-1.0297585412495506e-19|-,+-,+-,->\n", - "Ending Cycle33\n", - "-6.128425056345591e-20|+,0,0,+>\n", - "2.0361968183149893e-18|+,0,0,->\n", - "7.429183043355533e-17|+,0,+-,+>\n", - "4.7149066760230465e-17|+,+-,0,+>\n", - "4.508115458040475e-18|+,+-,+-,+>\n", - "-2.064617546614945e-18|+,+-,+-,->\n", - "-7.857354714148939e-15|-,0,0,+>\n", - "1.1944655584053697e-16|-,0,0,->\n", - "3.71572888054612e-17|-,0,+-,->\n", - "5.184333064861759e-18|-,+-,0,->\n", - "8.387620822926613e-15|-,+-,+-,+>\n", - "-1.4727635198575087e-19|-,+-,+-,->\n", - "Ending Cycle34\n", - "1.1643514569091799e-20|+,0,0,+>\n", - "9.70857804299768e-19|+,0,0,->\n", - "7.46278577234086e-17|+,0,+-,+>\n", - "4.758875230271613e-17|+,+-,0,+>\n", - "5.1010339358022026e-18|+,+-,+-,+>\n", - "-2.4762501347184528e-18|+,+-,+-,->\n", - "-8.715353138984001e-15|-,0,0,+>\n", - "8.123554779566762e-17|-,0,0,->\n", - "1.4092869612105962e-17|-,0,+-,->\n", - "7.664885918866752e-18|-,+-,0,->\n", - "9.231250342672013e-15|-,+-,+-,+>\n", - "-1.6991067060143366e-19|-,+-,+-,->\n", - "Ending Cycle35\n", - "2.1410747698237026e-18|+,0,0,->\n", - "9.991359728554694e-17|+,0,+-,+>\n", - "4.831695250720718e-17|+,+-,0,+>\n", - "5.7569222044848205e-18|+,+-,+-,+>\n", - "-3.1556078093786543e-18|+,+-,+-,->\n", - "-9.758563213133033e-15|-,0,0,+>\n", - "7.626707035062633e-17|-,0,0,->\n", - "1.1372869454887414e-17|-,0,+-,->\n", - "1.517317369327874e-17|-,+-,0,->\n", - "1.025699165617877e-14|-,+-,+-,+>\n", - "-1.6713585029375977e-19|-,+-,+-,->\n", - "Ending Cycle36\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 14%|█▍ | 39/278 [00:02<00:14, 16.57it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.50972293218981e-18|+,0,0,+>\n", - "2.1985182254920167e-18|+,0,0,->\n", - "9.496391828765099e-17|+,0,+-,+>\n", - "4.527445275068825e-17|+,+-,0,+>\n", - "5.667004827947213e-18|+,+-,+-,+>\n", - "-3.471517902300867e-18|+,+-,+-,->\n", - "-1.044754637165177e-14|-,0,0,+>\n", - "-1.866191350796222e-17|-,0,0,->\n", - "-3.731005093416422e-17|-,0,+-,->\n", - "1.7278885619062722e-17|-,+-,0,->\n", - "1.1098374244097077e-14|-,+-,+-,+>\n", - "-1.5492477282339814e-19|-,+-,+-,->\n", - "Ending Cycle37\n", - "5.564032775438046e-19|+,0,0,+>\n", - "3.873456329974483e-18|+,0,0,->\n", - "7.895137726660091e-17|+,0,+-,+>\n", - "4.388565495818677e-17|+,+-,0,+>\n", - "5.5137270890376735e-18|+,+-,+-,+>\n", - "-3.353141532488888e-18|+,+-,+-,->\n", - "-1.1388217676999242e-14|-,0,0,+>\n", - "-3.060091239313771e-17|-,0,0,->\n", - "-3.9234545662795774e-17|-,0,+-,->\n", - "1.9334494763813393e-17|-,+-,0,->\n", - "1.1976017851715907e-14|-,+-,+-,+>\n", - "-1.6079214281468584e-19|-,+-,+-,->\n", - "Ending Cycle38\n", - "9.39370867326811e-19|+,0,0,+>\n", - "4.2558005330126524e-18|+,0,0,->\n", - "8.297803832245276e-17|+,0,+-,+>\n", - "4.190767574020409e-17|+,+-,0,+>\n", - "5.318490759762339e-18|+,+-,+-,+>\n", - "-3.537892127451715e-18|+,+-,+-,->\n", - "-1.239868039500026e-14|-,0,0,+>\n", - "-4.087513994975697e-17|-,0,0,->\n", - "-4.0436967017827194e-17|-,0,+-,->\n", - "2.1990803124703248e-17|-,+-,0,->\n", - "1.3000074858419404e-14|-,+-,+-,+>\n", - "-2.2189803819929387e-19|-,+-,+-,->\n", - "Ending Cycle39\n", - "-7.764541276323103e-19|+,0,0,+>\n", - "2.5016398761100215e-18|+,0,0,->\n", - "7.971945724884877e-17|+,0,+-,+>\n", - "4.281952461950571e-17|+,+-,0,+>\n", - "6.468117586887321e-18|+,+-,+-,+>\n", - "-3.933781762665464e-18|+,+-,+-,->\n", - "-1.3409898325946613e-14|-,0,0,+>\n", - "-5.176248792212455e-17|-,0,0,->\n", - "-3.950448875297221e-17|-,0,+-,->\n", - "2.443488442390853e-17|-,+-,0,->\n", - "1.4013783063141e-14|-,+-,+-,+>\n", - "-2.2708405967273942e-19|-,+-,+-,->\n", - "Ending Cycle40\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 15%|█▌ | 43/278 [00:02<00:14, 16.44it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-5.366639228707132e-19|+,0,0,+>\n", - "3.411295787779019e-18|+,0,0,->\n", - "9.225555364836388e-17|+,0,+-,+>\n", - "4.219191135333694e-17|+,+-,0,+>\n", - "5.914424379653564e-18|+,+-,+-,+>\n", - "-4.424695961058212e-18|+,+-,+-,->\n", - "-1.4300177346213582e-14|-,0,0,+>\n", - "-6.95463465779613e-17|-,0,0,->\n", - "1.5294215202898667e-17|-,0,+-,->\n", - "2.7260397624393634e-17|-,+-,0,->\n", - "1.4894895248867742e-14|-,+-,+-,+>\n", - "-2.151109031954263e-19|-,+-,+-,->\n", - "Ending Cycle41\n", - "-1.1251632645411698e-18|+,0,0,+>\n", - "2.8301112891337474e-18|+,0,0,->\n", - "7.504326831452469e-17|+,0,+-,+>\n", - "4.3644423843142764e-17|+,+-,0,+>\n", - "5.126897585071625e-18|+,+-,+-,+>\n", - "-3.912852197110543e-18|+,+-,+-,->\n", - "-1.5258231497186227e-14|-,0,0,+>\n", - "-8.483072704561714e-17|-,0,0,->\n", - "6.469248377556951e-17|-,0,+-,->\n", - "3.0858106440756976e-17|-,+-,0,->\n", - "1.5807350615757255e-14|-,+-,+-,+>\n", - "-1.1483570753572312e-19|-,+-,+-,->\n", - "Ending Cycle42\n", - "2.60971610253204e-19|+,0,0,+>\n", - "3.0426359487684655e-18|+,0,0,->\n", - "7.534345619257638e-17|+,0,+-,+>\n", - "4.10638508419565e-17|+,+-,0,+>\n", - "5.534879770278638e-18|+,+-,+-,+>\n", - "-4.197872704173458e-18|+,+-,+-,->\n", - "-1.610573702870599e-14|-,0,0,+>\n", - "-1.0353772932875369e-16|-,0,0,->\n", - "1.27306458212477e-16|-,0,+-,->\n", - "3.43082196452117e-17|-,+-,0,->\n", - "1.6790451738214594e-14|-,+-,+-,+>\n", - "-6.833569150110028e-20|-,+-,+-,->\n", - "Ending Cycle43\n", - "-4.2674537891616655e-19|+,0,0,+>\n", - "3.631736950461978e-18|+,0,0,->\n", - "7.865955687604689e-17|+,0,+-,+>\n", - "4.212796695912904e-17|+,+-,0,+>\n", - "4.1492974922051495e-18|+,+-,+-,+>\n", - "-4.202158950218014e-18|+,+-,+-,->\n", - "-1.7133967483077505e-14|-,0,0,+>\n", - "-1.232935653801779e-16|-,0,0,->\n", - "1.9709922140783236e-16|-,0,+-,->\n", - "3.8525550259536755e-17|-,+-,0,->\n", - "1.7812848094423375e-14|-,+-,+-,+>\n", - "1.701662477694554e-19|-,+-,+-,->\n", - "Ending Cycle44\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 17%|█▋ | 47/278 [00:02<00:13, 16.54it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-4.2121372162559277e-19|+,0,0,+>\n", - "6.0645468652673535e-18|+,0,0,->\n", - "7.16794680156969e-17|+,0,+-,+>\n", - "4.163216698386798e-17|+,+-,0,+>\n", - "3.647363835133795e-18|+,+-,+-,+>\n", - "-3.804411806053858e-18|+,+-,+-,->\n", - "-1.798777840564797e-14|-,0,0,+>\n", - "-1.569671453452574e-16|-,0,0,->\n", - "3.002927130394654e-16|-,0,+-,->\n", - "4.276979046139204e-17|-,+-,0,->\n", - "1.878904691085627e-14|-,+-,+-,+>\n", - "2.5630306235361453e-19|-,+-,+-,->\n", - "Ending Cycle45\n", - "1.9895723695152975e-19|+,0,0,+>\n", - "4.1345585495726716e-18|+,0,0,->\n", - "6.231210541269119e-17|+,0,+-,+>\n", - "4.068009313286328e-17|+,+-,0,+>\n", - "2.901625011197656e-18|+,+-,+-,+>\n", - "-4.0559233344305e-18|+,+-,+-,->\n", - "-1.895339497604475e-14|-,0,0,+>\n", - "-1.7283203244669827e-16|-,0,0,->\n", - "3.2543536541927475e-16|-,0,+-,->\n", - "3.0709964103383466e-17|-,+-,0,->\n", - "1.7324884380803657e-14|-,+-,+-,+>\n", - "2.471132057291415e-19|-,+-,+-,->\n", - "Ending Cycle46\n", - "2.2200266863135962e-20|+,0,0,+>\n", - "4.033640553680407e-18|+,0,0,->\n", - "5.3020495565413355e-17|+,0,+-,+>\n", - "4.044218609211739e-17|+,+-,0,+>\n", - "2.6814461465535803e-18|+,+-,+-,+>\n", - "-3.764378141632264e-18|+,+-,+-,->\n", - "-1.8770248229468446e-14|-,0,0,+>\n", - "-1.8943695986087842e-16|-,0,0,->\n", - "3.4795784715427257e-16|-,0,+-,->\n", - "2.895783402893943e-17|-,+-,0,->\n", - "1.594651192524875e-14|-,+-,+-,+>\n", - "2.0756490051277583e-19|-,+-,+-,->\n", - "Ending Cycle47\n", - "2.1279270643174437e-19|+,0,0,+>\n", - "1.6211898619403742e-18|+,0,0,->\n", - "5.316325332112674e-17|+,0,+-,+>\n", - "3.9074316557124756e-17|+,+-,0,+>\n", - "2.2342716604190907e-18|+,+-,+-,+>\n", - "-4.063843206427392e-18|+,+-,+-,->\n", - "-1.8920688061298584e-14|-,0,0,+>\n", - "-2.066359346977252e-16|-,0,0,->\n", - "3.673308562659293e-16|-,0,+-,->\n", - "2.723762193917093e-17|-,+-,0,->\n", - "1.45191521566901e-14|-,+-,+-,+>\n", - "1.5375202333847204e-19|-,+-,+-,->\n", - "Ending Cycle48\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 18%|█▊ | 51/278 [00:03<00:13, 16.49it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-2.0683945043864895e-19|+,0,0,+>\n", - "1.7911722348905956e-18|+,0,0,->\n", - "3.298442519216627e-17|+,0,+-,+>\n", - "3.901583726746302e-17|+,+-,0,+>\n", - "3.056837888817425e-18|+,+-,+-,+>\n", - "-4.466534367600105e-18|+,+-,+-,->\n", - "-1.8974038794627973e-14|-,0,0,+>\n", - "-2.2415955452812787e-16|-,0,0,->\n", - "3.869350175444845e-16|-,0,+-,->\n", - "2.5588035028619482e-17|-,+-,0,->\n", - "1.3280619063934872e-14|-,+-,+-,+>\n", - "1.608926902178714e-19|-,+-,+-,->\n", - "Ending Cycle49\n", - "7.168816798463818e-19|+,0,0,+>\n", - "2.8750368323794057e-18|+,0,0,->\n", - "3.280141187541793e-17|+,0,+-,+>\n", - "3.65552407049185e-17|+,+-,0,+>\n", - "2.7026179268253135e-18|+,+-,+-,+>\n", - "-4.537111241768314e-18|+,+-,+-,->\n", - "-1.8813287049467923e-14|-,0,0,+>\n", - "-2.4196532082254864e-16|-,0,0,->\n", - "4.1058735101942145e-16|-,0,+-,->\n", - "2.3319435237834157e-17|-,+-,0,->\n", - "1.2156034625725279e-14|-,+-,+-,+>\n", - "2.5596214122223684e-19|-,+-,+-,->\n", - "Ending Cycle50\n", - "9.9195527509876e-20|+,0,0,+>\n", - "2.0236549375248686e-18|+,0,0,->\n", - "2.3487876810909933e-17|+,0,+-,+>\n", - "3.6798990752994054e-17|+,+-,0,+>\n", - "3.4135991429258833e-18|+,+-,+-,+>\n", - "-5.3313066413739254e-18|+,+-,+-,->\n", - "-1.8807731324157397e-14|-,0,0,+>\n", - "-2.6614819430176035e-16|-,0,0,->\n", - "4.372280079351848e-16|-,0,+-,->\n", - "2.1001253220575572e-17|-,+-,0,->\n", - "1.1123554936668151e-14|-,+-,+-,+>\n", - "1.9039943169117997e-19|-,+-,+-,->\n", - "Ending Cycle51\n", - "4.7267570735043e-19|+,0,0,+>\n", - "1.0501991126688723e-18|+,0,0,->\n", - "2.4462354381404003e-17|+,0,+-,+>\n", - "3.5375002644659114e-17|+,+-,0,+>\n", - "4.451235602175327e-18|+,+-,+-,+>\n", - "-4.478914496836692e-18|+,+-,+-,->\n", - "-1.8681655409485564e-14|-,0,0,+>\n", - "-2.9651395588719837e-16|-,0,0,->\n", - "4.689365476258249e-16|-,0,+-,->\n", - "1.736468630636364e-17|-,+-,0,->\n", - "1.0221607405248892e-14|-,+-,+-,+>\n", - "4.513122958750375e-19|-,+-,+-,->\n", - "Ending Cycle52\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 20%|█▉ | 55/278 [00:03<00:13, 16.79it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.741013388213816e-20|+,0,0,+>\n", - "5.596954404901373e-19|+,0,0,->\n", - "3.78447760598441e-17|+,0,+-,+>\n", - "3.5849266348530825e-17|+,+-,0,+>\n", - "5.022473624128595e-18|+,+-,+-,+>\n", - "-5.301548811739977e-18|+,+-,+-,->\n", - "-1.8535536351853854e-14|-,0,0,+>\n", - "-3.3628386784944123e-16|-,0,0,->\n", - "5.084221179065016e-16|-,0,+-,->\n", - "1.2544030016148244e-17|-,+-,0,->\n", - "9.38435363058447e-15|-,+-,+-,+>\n", - "6.102483128530569e-19|-,+-,+-,->\n", - "Ending Cycle53\n", - "-5.191143592555827e-19|+,0,0,+>\n", - "-3.644775893718033e-19|+,0,0,->\n", - "6.222896210974408e-17|+,0,+-,+>\n", - "3.66481268917217e-17|+,+-,0,+>\n", - "5.2289309781781655e-18|+,+-,+-,+>\n", - "-5.617648260374712e-18|+,+-,+-,->\n", - "-1.8538145282361334e-14|-,0,0,+>\n", - "-3.9045680717877344e-16|-,0,0,->\n", - "5.538000748035149e-16|-,0,+-,->\n", - "5.912818691314217e-18|-,+-,0,->\n", - "8.519917973274464e-15|-,+-,+-,+>\n", - "3.520659148102213e-19|-,+-,+-,->\n", - "Ending Cycle54\n", - "-1.3196167490146929e-18|+,0,0,+>\n", - "-3.476648827227526e-18|+,0,0,->\n", - "5.781418572602809e-17|+,0,+-,+>\n", - "3.959817622482957e-17|+,+-,0,+>\n", - "5.997670456823783e-18|+,+-,+-,+>\n", - "-6.686900634720075e-18|+,+-,+-,->\n", - "-1.840642260540659e-14|-,0,0,+>\n", - "-4.522946648046182e-16|-,0,0,->\n", - "6.115725500232339e-16|-,0,+-,->\n", - "-3.7263493435432126e-18|-,+-,0,->\n", - "7.878136015753107e-15|-,+-,+-,+>\n", - "1.3395428352040266e-19|-,+-,+-,->\n", - "Ending Cycle55\n", - "-3.659802068914248e-20|+,0,0,+>\n", - "-3.366898393601305e-18|+,0,0,->\n", - "6.045330079252532e-17|+,0,+-,+>\n", - "3.784633233156562e-17|+,+-,0,+>\n", - "3.2046737434591634e-18|+,+-,+-,+>\n", - "-6.046195277585267e-18|+,+-,+-,->\n", - "-1.837688921609439e-14|-,0,0,+>\n", - "-5.422008505359729e-16|-,0,0,->\n", - "6.768717002634867e-16|-,0,+-,->\n", - "-1.5017389805190162e-17|-,+-,0,->\n", - "7.224343186730355e-15|-,+-,+-,+>\n", - "4.90105647382177e-20|-,+-,+-,->\n", - "Ending Cycle56\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 21%|██ | 59/278 [00:03<00:13, 16.43it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-2.777484315013314e-19|+,0,0,+>\n", - "1.206328047103738e-18|+,0,0,->\n", - "8.96425737322246e-17|+,0,+-,+>\n", - "3.897350904940725e-17|+,+-,0,+>\n", - "3.014801730391572e-18|+,+-,+-,+>\n", - "-5.02034453776571e-18|+,+-,+-,->\n", - "-1.82212699994291e-14|-,0,0,+>\n", - "-6.529262990774357e-16|-,0,0,->\n", - "7.606374118671215e-16|-,0,+-,->\n", - "-6.673759139381623e-18|-,+-,0,->\n", - "6.6023631297289084e-15|-,+-,+-,+>\n", - "2.4934978960119793e-19|-,+-,+-,->\n", - "Ending Cycle57\n", - "-1.4561501668065256e-18|+,0,0,+>\n", - "3.0382463629351186e-18|+,0,0,->\n", - "6.68274600078718e-17|+,0,+-,+>\n", - "4.077368009061377e-17|+,+-,0,+>\n", - "-6.8329859620356665e-18|+,+-,+-,+>\n", - "-4.714058852222451e-18|+,+-,+-,->\n", - "-1.812365458779538e-14|-,0,0,+>\n", - "-7.981598655930871e-16|-,0,0,->\n", - "8.637106241045341e-16|-,0,+-,->\n", - "-9.062444780632156e-19|-,+-,0,->\n", - "5.992920615856571e-15|-,+-,+-,+>\n", - "1.95616103388901e-19|-,+-,+-,->\n", - "Ending Cycle58\n", - "5.435825446596742e-19|+,0,0,+>\n", - "-1.9092875360355377e-18|+,0,0,->\n", - "6.693797476709585e-17|+,0,+-,+>\n", - "3.8986429349624017e-17|+,+-,0,+>\n", - "-2.1938520304208168e-18|+,+-,+-,+>\n", - "-8.732191889806018e-18|+,+-,+-,->\n", - "-1.8004303561586046e-14|-,0,0,+>\n", - "-9.692615810205531e-16|-,0,0,->\n", - "1.0082211361811285e-15|-,0,+-,->\n", - "-2.7280201245741024e-17|-,+-,0,->\n", - "5.532258084275812e-15|-,+-,+-,+>\n", - "1.0906581403964465e-19|-,+-,+-,->\n", - "Ending Cycle59\n", - "5.08937398355874e-19|+,0,0,+>\n", - "-8.273120580246176e-18|+,0,0,->\n", - "6.580800303832165e-17|+,0,+-,+>\n", - "3.869705727115075e-17|+,+-,0,+>\n", - "-1.7239887423719956e-18|+,+-,+-,+>\n", - "-1.0980641335900896e-17|+,+-,+-,->\n", - "-1.804264390256576e-14|-,0,0,+>\n", - "-1.2211482286550898e-15|-,0,0,->\n", - "1.1666823431816295e-15|-,0,+-,->\n", - "-3.396617696021022e-17|-,+-,0,->\n", - "5.0069948207222365e-15|-,+-,+-,+>\n", - "5.27496167344739e-19|-,+-,+-,->\n", - "Ending Cycle60\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 23%|██▎ | 63/278 [00:03<00:12, 16.92it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "7.781719361137172e-19|+,0,0,+>\n", - "-9.083895115511108e-18|+,0,0,->\n", - "6.675200355331426e-17|+,0,+-,+>\n", - "3.7691908978493676e-17|+,+-,0,+>\n", - "-2.642874761723795e-18|+,+-,+-,+>\n", - "-1.0932380392368371e-17|+,+-,+-,->\n", - "-1.8027439128562827e-14|-,0,0,+>\n", - "-1.5366722644834243e-15|-,0,0,->\n", - "1.3725161910337071e-15|-,0,+-,->\n", - "-5.447359519691953e-17|-,+-,0,->\n", - "4.638052825713329e-15|-,+-,+-,+>\n", - "2.412123270493686e-19|-,+-,+-,->\n", - "Ending Cycle61\n", - "1.6157795249637012e-18|+,0,0,+>\n", - "-1.0343495221245968e-17|+,0,0,->\n", - "6.736538171796673e-17|+,0,+-,+>\n", - "3.7763983131271783e-17|+,+-,0,+>\n", - "-4.583735680067732e-18|+,+-,+-,+>\n", - "-1.0587448527880546e-17|+,+-,+-,->\n", - "-1.783542223856624e-14|-,0,0,+>\n", - "-2.5409112504601764e-15|-,0,0,->\n", - "1.5814357812629554e-15|-,0,+-,->\n", - "-5.582935970915332e-17|-,+-,0,->\n", - "4.260243078986745e-15|-,+-,+-,+>\n", - "8.880494518735492e-20|-,+-,+-,->\n", - "Ending Cycle62\n", - "1.771699863554869e-18|+,0,0,+>\n", - "-7.960958988851071e-18|+,0,0,->\n", - "8.720331285517325e-17|+,0,+-,+>\n", - "3.7031818501486893e-17|+,+-,0,+>\n", - "-8.040619631481984e-18|+,+-,+-,+>\n", - "-1.2356765675295288e-17|+,+-,+-,->\n", - "-1.7763511845705607e-14|-,0,0,+>\n", - "-3.3058770815398298e-15|-,0,0,->\n", - "1.6063473181558773e-15|-,0,+-,->\n", - "-5.823336949277647e-17|-,+-,0,->\n", - "3.878771593417404e-15|-,+-,+-,+>\n", - "Ending Cycle63\n", - "6.895018719946631e-19|+,0,0,+>\n", - "-1.325279732304012e-17|+,0,0,->\n", - "6.972073010817407e-17|+,0,+-,+>\n", - "3.9357465673864165e-17|+,+-,0,+>\n", - "-5.8632303565382876e-18|+,+-,+-,+>\n", - "-1.2317723199910198e-17|+,+-,+-,->\n", - "-1.7627042195140174e-14|-,0,0,+>\n", - "-4.326590663636422e-15|-,0,0,->\n", - "1.6332590350151554e-15|-,0,+-,->\n", - "-5.926841622671824e-17|-,+-,0,->\n", - "3.5866692334524495e-15|-,+-,+-,+>\n", - "5.123484904035619e-19|-,+-,+-,->\n", - "Ending Cycle64\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 24%|██▍ | 67/278 [00:04<00:12, 16.31it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.2596035540517752e-18|+,0,0,+>\n", - "-2.073154759180454e-17|+,0,0,->\n", - "5.875436542551418e-17|+,0,+-,+>\n", - "3.8585871946914963e-17|+,+-,0,+>\n", - "-7.105380454108938e-18|+,+-,+-,+>\n", - "-1.2151712006565246e-17|+,+-,+-,->\n", - "-1.745396640954557e-14|-,0,0,+>\n", - "-4.347290887881831e-15|-,0,0,->\n", - "1.575912414333668e-15|-,0,+-,->\n", - "-5.0782171464289147e-17|-,+-,0,->\n", - "3.287573659164845e-15|-,+-,+-,+>\n", - "2.525144821304678e-19|-,+-,+-,->\n", - "Ending Cycle65\n", - "2.2498943357597337e-18|+,0,0,+>\n", - "-2.6096530595700748e-17|+,0,0,->\n", - "6.892819607311188e-17|+,0,+-,+>\n", - "3.8248080970081033e-17|+,+-,0,+>\n", - "-6.7921271542582134e-18|+,+-,+-,+>\n", - "-1.3212795059818244e-17|+,+-,+-,->\n", - "-1.7270344081048677e-14|-,0,0,+>\n", - "-4.368354886648795e-15|-,0,0,->\n", - "1.526318191230909e-15|-,0,+-,->\n", - "-4.4922090350020614e-17|-,+-,0,->\n", - "2.9558342083217132e-15|-,+-,+-,+>\n", - "6.263750927255275e-19|-,+-,+-,->\n", - "Ending Cycle66\n", - "1.365341834729813e-18|+,0,0,+>\n", - "-2.6895757746460263e-17|+,0,0,->\n", - "8.305633370362138e-17|+,0,+-,+>\n", - "4.126920306860098e-17|+,+-,0,+>\n", - "-6.717526299957722e-18|+,+-,+-,+>\n", - "-1.2861182430826275e-17|+,+-,+-,->\n", - "-1.722929223005965e-14|-,0,0,+>\n", - "-4.3925004689473395e-15|-,0,0,->\n", - "1.4700890316140336e-15|-,0,+-,->\n", - "-4.022798188427307e-17|-,+-,0,->\n", - "2.753710385715708e-15|-,+-,+-,+>\n", - "8.791458615100813e-19|-,+-,+-,->\n", - "Ending Cycle67\n", - "1.6957437075612383e-18|+,0,0,+>\n", - "-2.7380128796763717e-17|+,0,0,->\n", - "9.080388724362568e-17|+,0,+-,+>\n", - "4.0396316025160176e-17|+,+-,0,+>\n", - "-8.054499770041126e-18|+,+-,+-,+>\n", - "-1.5040258427636098e-17|+,+-,+-,->\n", - "-1.699266933388482e-14|-,0,0,+>\n", - "-4.419180338076507e-15|-,0,0,->\n", - "1.4162426878988372e-15|-,0,+-,->\n", - "-3.5805427443033464e-17|-,+-,0,->\n", - "2.646737960696375e-15|-,+-,+-,+>\n", - "1.0336253029623794e-18|-,+-,+-,->\n", - "Ending Cycle68\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 26%|██▌ | 71/278 [00:04<00:12, 16.38it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.6583435921078377e-18|+,0,0,+>\n", - "-3.134300595256373e-17|+,0,0,->\n", - "1.2811518899555946e-16|+,0,+-,+>\n", - "4.312035359435569e-17|+,+-,0,+>\n", - "-6.477192884830543e-18|+,+-,+-,+>\n", - "-1.540211527057772e-17|+,+-,+-,->\n", - "-1.7022911531924033e-14|-,0,0,+>\n", - "-4.4474729473970816e-15|-,0,0,->\n", - "1.3642198875012035e-15|-,0,+-,->\n", - "-3.186059862056195e-17|-,+-,0,->\n", - "2.439919592984429e-15|-,+-,+-,+>\n", - "1.4598132395563198e-18|-,+-,+-,->\n", - "Ending Cycle69\n", - "2.7214559459114775e-19|+,0,0,+>\n", - "-3.404973445242821e-17|+,0,0,->\n", - "1.3188454470424096e-16|+,0,+-,+>\n", - "4.82254740109127e-17|+,+-,0,+>\n", - "-8.950585828631963e-18|+,+-,+-,+>\n", - "-1.7667517739691272e-17|+,+-,+-,->\n", - "-1.6994360733584358e-14|-,0,0,+>\n", - "-4.4810059073794385e-15|-,0,0,->\n", - "1.3144005762623606e-15|-,0,+-,->\n", - "-2.9039957072701936e-17|-,+-,0,->\n", - "2.277339047609141e-15|-,+-,+-,+>\n", - "1.0465348023302298e-18|-,+-,+-,->\n", - "Ending Cycle70\n", - "1.449745921651754e-18|+,0,0,+>\n", - "-3.2834535266396225e-17|+,0,0,->\n", - "1.4540983360924726e-16|+,0,+-,+>\n", - "4.920491201506843e-17|+,+-,0,+>\n", - "-9.803348417154551e-18|+,+-,+-,+>\n", - "-1.6724556185599485e-17|+,+-,+-,->\n", - "-1.689336530810666e-14|-,0,0,+>\n", - "-4.519095774527898e-15|-,0,0,->\n", - "1.2668695445150587e-15|-,0,+-,->\n", - "-2.7118105687014953e-17|-,+-,0,->\n", - "2.1124020736441696e-15|-,+-,+-,+>\n", - "1.191869957117167e-18|-,+-,+-,->\n", - "Ending Cycle71\n", - "9.504876331485953e-19|+,0,0,+>\n", - "-3.6023579824483805e-17|+,0,0,->\n", - "1.3040640412038064e-16|+,0,+-,+>\n", - "5.249450985309729e-17|+,+-,0,+>\n", - "-1.1557438243604699e-17|+,+-,+-,+>\n", - "-7.920139328459777e-18|+,+-,+-,->\n", - "-1.6738931315213643e-14|-,0,0,+>\n", - "-4.567243194833639e-15|-,0,0,->\n", - "1.2213718362687455e-15|-,0,+-,->\n", - "-2.6479690662776695e-17|-,+-,0,->\n", - "1.8916333110701736e-15|-,+-,+-,+>\n", - "1.0746350938529315e-18|-,+-,+-,->\n", - "Ending Cycle72\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 27%|██▋ | 75/278 [00:04<00:12, 16.33it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.638191080294048e-18|+,0,0,+>\n", - "-3.6249645413049986e-17|+,0,0,->\n", - "1.3688247315627453e-16|+,0,+-,+>\n", - "4.9410501008507115e-17|+,+-,0,+>\n", - "-1.5032917548160358e-17|+,+-,+-,+>\n", - "-7.235400846898495e-18|+,+-,+-,->\n", - "-1.669915296046028e-14|-,0,0,+>\n", - "-4.6212187866130076e-15|-,0,0,->\n", - "1.177621429570375e-15|-,0,+-,->\n", - "-2.71551886617345e-17|-,+-,0,->\n", - "1.7026253916824832e-15|-,+-,+-,+>\n", - "6.923395967659092e-19|-,+-,+-,->\n", - "Ending Cycle73\n", - "1.6461403041649222e-18|+,0,0,+>\n", - "-3.7306035516991145e-17|+,0,0,->\n", - "1.5311760998997806e-16|+,0,+-,+>\n", - "5.178968841497996e-17|+,+-,0,+>\n", - "-1.6267178750579086e-17|+,+-,+-,+>\n", - "-1.0439134793176109e-17|+,+-,+-,->\n", - "-1.6588105066175824e-14|-,0,0,+>\n", - "-4.6883028420468286e-15|-,0,0,->\n", - "1.1362016562121995e-15|-,0,+-,->\n", - "-2.990281038212694e-17|-,+-,0,->\n", - "1.527690332120422e-15|-,+-,+-,+>\n", - "1.4761111587539718e-18|-,+-,+-,->\n", - "Ending Cycle74\n", - "6.955114777018042e-19|+,0,0,+>\n", - "-3.675455188705399e-17|+,0,0,->\n", - "1.432260335579637e-16|+,0,+-,+>\n", - "5.374017474440606e-17|+,+-,0,+>\n", - "-1.7433922852739332e-17|+,+-,+-,+>\n", - "-1.1371134286726725e-17|+,+-,+-,->\n", - "-1.6528809492627145e-14|-,0,0,+>\n", - "-4.768526435110813e-15|-,0,0,->\n", - "1.0970114399190463e-15|-,0,+-,->\n", - "-3.526354435798693e-17|-,+-,0,->\n", - "1.344520126095725e-15|-,+-,+-,+>\n", - "1.8204201524432806e-18|-,+-,+-,->\n", - "Ending Cycle75\n", - "2.5022407988586385e-18|+,0,0,+>\n", - "-3.976212970719246e-17|+,0,0,->\n", - "1.3669144035262065e-16|+,0,+-,+>\n", - "5.0159792954170234e-17|+,+-,0,+>\n", - "-1.9126432884122814e-17|+,+-,+-,+>\n", - "-1.1295785132835759e-17|+,+-,+-,->\n", - "-1.6446748704547802e-14|-,0,0,+>\n", - "-4.867388768681499e-15|-,0,0,->\n", - "1.0586989605412095e-15|-,0,+-,->\n", - "-4.411744352867358e-17|-,+-,0,->\n", - "1.295742212537166e-15|-,+-,+-,+>\n", - "2.04510038261172e-18|-,+-,+-,->\n", - "Ending Cycle76\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 28%|██▊ | 79/278 [00:04<00:11, 16.76it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.0486832391829346e-18|+,0,0,+>\n", - "-4.141427355830153e-17|+,0,0,->\n", - "1.221233186456223e-16|+,0,+-,+>\n", - "5.2008708616539555e-17|+,+-,0,+>\n", - "-1.9856026261207758e-17|+,+-,+-,+>\n", - "7.412833959844877e-18|+,+-,+-,->\n", - "-1.6459758647852144e-14|-,0,0,+>\n", - "-4.991538566108425e-15|-,0,0,->\n", - "1.0219590829892958e-15|-,0,+-,->\n", - "-2.204994551144153e-17|-,+-,0,->\n", - "1.1555884552919548e-15|-,+-,+-,+>\n", - "1.1391762483763308e-18|-,+-,+-,->\n", - "Ending Cycle77\n", - "2.6590076608636084e-18|+,0,0,+>\n", - "-5.042113268159273e-17|+,0,0,->\n", - "1.158058306017359e-16|+,0,+-,+>\n", - "4.991505955820232e-17|+,+-,0,+>\n", - "-2.1712905370721497e-17|+,+-,+-,+>\n", - "6.2626495478864e-19|+,+-,+-,->\n", - "-1.6437435684198326e-14|-,0,0,+>\n", - "-5.1420654524841675e-15|-,0,0,->\n", - "9.891494673348993e-16|-,0,+-,->\n", - "-3.966031119025201e-18|-,+-,0,->\n", - "1.0976380525189946e-15|-,+-,+-,+>\n", - "4.259556409142445e-19|-,+-,+-,->\n", - "Ending Cycle78\n", - "2.7887992734872585e-18|+,0,0,+>\n", - "-4.9226763781468984e-17|+,0,0,->\n", - "1.346405616137972e-16|+,0,+-,+>\n", - "4.883105932812098e-17|+,+-,0,+>\n", - "-2.1291644176268496e-17|+,+-,+-,+>\n", - "-5.8569652550765974e-18|+,+-,+-,->\n", - "-1.6233483355363146e-14|-,0,0,+>\n", - "-5.322198403798676e-15|-,0,0,->\n", - "9.614409526274655e-16|-,0,+-,->\n", - "-2.5773571253179607e-17|-,+-,0,->\n", - "1.093545778038803e-15|-,+-,+-,+>\n", - "8.806000619397663e-19|-,+-,+-,->\n", - "Ending Cycle79\n", - "1.9844207641913772e-18|+,0,0,+>\n", - "-5.489762287633407e-17|+,0,0,->\n", - "1.212039744960773e-16|+,0,+-,+>\n", - "5.287899057222117e-17|+,+-,0,+>\n", - "-2.547090711667651e-17|+,+-,+-,+>\n", - "-1.1830192706600467e-17|+,+-,+-,->\n", - "-1.627743561624528e-14|-,0,0,+>\n", - "-5.5360147729203454e-15|-,0,0,->\n", - "9.354317868333841e-16|-,0,+-,->\n", - "-1.978307312862309e-17|-,+-,0,->\n", - "9.798426727206914e-16|-,+-,+-,+>\n", - "1.6139311272729182e-18|-,+-,+-,->\n", - "Ending Cycle80\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 30%|██▉ | 83/278 [00:05<00:12, 15.96it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3.055019303631117e-18|+,0,0,+>\n", - "-5.910301885223953e-17|+,0,0,->\n", - "1.1072863455475395e-16|+,0,+-,+>\n", - "5.106124788512443e-17|+,+-,0,+>\n", - "-3.244169406873261e-17|+,+-,+-,+>\n", - "-6.229575176312176e-17|+,+-,+-,->\n", - "-1.6250986856418173e-14|-,0,0,+>\n", - "-5.819598171044803e-15|-,0,0,->\n", - "8.994577437713472e-16|-,0,+-,->\n", - "-1.799969226208296e-17|-,+-,0,->\n", - "9.241949978943764e-16|-,+-,+-,+>\n", - "1.5460598622214124e-18|-,+-,+-,->\n", - "Ending Cycle81\n", - "2.630862612967606e-18|+,0,0,+>\n", - "-5.939799995225138e-17|+,0,0,->\n", - "1.2427872497233697e-16|+,0,+-,+>\n", - "5.4875483882894445e-17|+,+-,0,+>\n", - "-3.5958165185094946e-17|+,+-,+-,+>\n", - "-6.86664908569309e-17|+,+-,+-,->\n", - "-1.619872065807122e-14|-,0,0,+>\n", - "-6.1651796840311366e-15|-,0,0,->\n", - "8.675059611507625e-16|-,0,+-,->\n", - "-3.101660396136567e-17|-,+-,0,->\n", - "7.899402471631637e-16|-,+-,+-,+>\n", - "1.7315363243870723e-18|-,+-,+-,->\n", - "Ending Cycle82\n", - "3.4105994950937297e-18|+,0,0,+>\n", - "-5.735194743228119e-17|+,0,0,->\n", - "1.3914956693965226e-16|+,0,+-,+>\n", - "5.194764666218804e-17|+,+-,0,+>\n", - "-3.877805030176186e-17|+,+-,+-,+>\n", - "-6.647425755475478e-17|+,+-,+-,->\n", - "-1.6087835955578058e-14|-,0,0,+>\n", - "-6.5924072422087244e-15|-,0,0,->\n", - "8.358130463306264e-16|-,0,+-,->\n", - "-1.898288422058818e-17|-,+-,0,->\n", - "6.55531697090333e-16|-,+-,+-,+>\n", - "1.2736247874893769e-18|-,+-,+-,->\n", - "Ending Cycle83\n", - "2.476861195891472e-18|+,0,0,+>\n", - "-5.658064751180948e-17|+,0,0,->\n", - "1.2105268879316975e-16|+,0,+-,+>\n", - "5.671571498851891e-17|+,+-,0,+>\n", - "-3.9456587421215324e-17|+,+-,+-,+>\n", - "-6.685058771938786e-17|+,+-,+-,->\n", - "-1.6160001578877114e-14|-,0,0,+>\n", - "-7.105601988385508e-15|-,0,0,->\n", - "8.083694790380006e-16|-,0,+-,->\n", - "-2.208072723495219e-17|-,+-,0,->\n", - "6.637834726449114e-16|-,+-,+-,+>\n", - "1.0547563574008074e-18|-,+-,+-,->\n", - "Ending Cycle84\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 31%|███▏ | 87/278 [00:05<00:11, 16.38it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "5.21834386083451e-18|+,0,0,+>\n", - "-6.235272006375888e-17|+,0,0,->\n", - "1.4569193375079227e-16|+,0,+-,+>\n", - "5.31355483935681e-17|+,+-,0,+>\n", - "-4.3304838519016455e-17|+,+-,+-,+>\n", - "-6.74375023879953e-17|+,+-,+-,->\n", - "-1.6220555829231175e-14|-,0,0,+>\n", - "-7.725699729751503e-15|-,0,0,->\n", - "7.823574403102883e-16|-,0,+-,->\n", - "-4.237989335337106e-17|-,+-,0,->\n", - "5.557345563949751e-16|-,+-,+-,+>\n", - "1.6630837247947338e-19|-,+-,+-,->\n", - "Ending Cycle85\n", - "3.0550088350584798e-18|+,0,0,+>\n", - "-6.462175840599126e-17|+,0,0,->\n", - "1.7543445317610096e-16|+,0,+-,+>\n", - "6.036051709418679e-17|+,+-,0,+>\n", - "-4.640251058400007e-17|+,+-,+-,+>\n", - "-6.968560401190278e-17|+,+-,+-,->\n", - "-1.6257931945006624e-14|-,0,0,+>\n", - "-8.468261237373194e-15|-,0,0,->\n", - "7.573406767611256e-16|-,0,+-,->\n", - "-4.660730021975775e-17|-,+-,0,->\n", - "5.404123894688028e-16|-,+-,+-,+>\n", - "5.180650080372171e-19|-,+-,+-,->\n", - "Ending Cycle86\n", - "2.161391707263535e-18|+,0,0,+>\n", - "-7.026228525504625e-17|+,0,0,->\n", - "1.9605732586659675e-16|+,0,+-,+>\n", - "6.385726193155283e-17|+,+-,0,+>\n", - "-4.5341263896578394e-17|+,+-,+-,+>\n", - "-7.072068667674012e-17|+,+-,+-,->\n", - "-1.6087130044738476e-14|-,0,0,+>\n", - "-9.369985995745192e-15|-,0,0,->\n", - "7.342097319358156e-16|-,0,+-,->\n", - "-3.4664480791785305e-17|-,+-,0,->\n", - "4.69096865507291e-16|-,+-,+-,+>\n", - "5.924230850259766e-19|-,+-,+-,->\n", - "Ending Cycle87\n", - "1.1807335267895324e-18|+,0,0,+>\n", - "-7.8845126514975e-17|+,0,0,->\n", - "1.8134236130468283e-16|+,0,+-,+>\n", - "6.830190338790025e-17|+,+-,0,+>\n", - "-4.373779135982608e-17|+,+-,+-,+>\n", - "-6.882250972037296e-17|+,+-,+-,->\n", - "-1.603457838774626e-14|-,0,0,+>\n", - "-1.0470682729997194e-14|-,0,0,->\n", - "7.169608696296869e-16|-,0,+-,->\n", - "-1.4431569567048553e-17|-,+-,0,->\n", - "4.891384322350856e-16|-,+-,+-,+>\n", - "1.647837266046624e-19|-,+-,+-,->\n", - "Ending Cycle88\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 33%|███▎ | 91/278 [00:05<00:11, 16.71it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3.822716354561474e-18|+,0,0,+>\n", - "-1.1480607103662812e-16|+,0,0,->\n", - "1.6165413008629312e-16|+,0,+-,+>\n", - "6.354696209026951e-17|+,+-,0,+>\n", - "-4.5832022276139403e-17|+,+-,+-,+>\n", - "-6.973561761643342e-17|+,+-,+-,->\n", - "-1.5993840823467122e-14|-,0,0,+>\n", - "-1.1782598435510063e-14|-,0,0,->\n", - "7.019693806618253e-16|-,0,+-,->\n", - "7.560051490267484e-18|-,+-,0,->\n", - "4.28305043084067e-16|-,+-,+-,+>\n", - "-3.9027570725151566e-20|-,+-,+-,->\n", - "Ending Cycle89\n", - "3.1633819416810567e-18|+,0,0,+>\n", - "-1.2868129504042855e-16|+,0,0,->\n", - "1.7000938317880585e-16|+,0,+-,+>\n", - "6.61112615028888e-17|+,+-,0,+>\n", - "-4.7614237310773796e-17|+,+-,+-,+>\n", - "-7.158626416068921e-17|+,+-,+-,->\n", - "-1.596723435297554e-14|-,0,0,+>\n", - "-1.1803121277797119e-14|-,0,0,->\n", - "6.76823618698915e-16|-,0,+-,->\n", - "2.8357904546916286e-17|-,+-,0,->\n", - "3.83309765169489e-16|-,+-,+-,+>\n", - "1.3300377687788786e-18|-,+-,+-,->\n", - "Ending Cycle90\n", - "2.8185246669654736e-18|+,0,0,+>\n", - "-1.311252021242492e-16|+,0,0,->\n", - "2.011855480854577e-16|+,0,+-,+>\n", - "6.614930311434974e-17|+,+-,0,+>\n", - "-4.9178830486992476e-17|+,+-,+-,+>\n", - "-6.019705882657894e-17|+,+-,+-,->\n", - "-1.5878757624255324e-14|-,0,0,+>\n", - "-1.1822903914051597e-14|-,0,0,->\n", - "6.551902438409202e-16|-,0,+-,->\n", - "4.104143159299728e-17|-,+-,0,->\n", - "4.2715875636616726e-16|-,+-,+-,+>\n", - "1.8302421645646532e-18|-,+-,+-,->\n", - "Ending Cycle91\n", - "1.9955335032786462e-18|+,0,0,+>\n", - "-1.3914891758265635e-16|+,0,0,->\n", - "1.9929845891602952e-16|+,0,+-,+>\n", - "6.821505065881219e-17|+,+-,0,+>\n", - "-5.553530366309209e-17|+,+-,+-,+>\n", - "-5.731238909641848e-17|+,+-,+-,->\n", - "-1.5807270981285963e-14|-,0,0,+>\n", - "-1.1845601126953334e-14|-,0,0,->\n", - "6.357824777080668e-16|-,0,+-,->\n", - "5.268898418156148e-17|-,+-,0,->\n", - "5.313434396971633e-16|-,+-,+-,+>\n", - "9.786492269767642e-19|-,+-,+-,->\n", - "Ending Cycle92\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 34%|███▍ | 95/278 [00:05<00:10, 16.87it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.670290036977363e-18|+,0,0,+>\n", - "-1.4641417098293901e-16|+,0,0,->\n", - "1.816049020424122e-16|+,0,+-,+>\n", - "7.046674769241166e-17|+,+-,0,+>\n", - "-5.2699283915910925e-17|+,+-,+-,+>\n", - "-6.586156239529753e-17|+,+-,+-,->\n", - "-1.5686036377617367e-14|-,0,0,+>\n", - "-1.1874778386365499e-14|-,0,0,->\n", - "6.150623367816796e-16|-,0,+-,->\n", - "6.371816283190773e-17|-,+-,0,->\n", - "5.08275969382936e-16|-,+-,+-,+>\n", - "1.2254800911759374e-18|-,+-,+-,->\n", - "Ending Cycle93\n", - "4.7460714850682264e-18|+,0,0,+>\n", - "-1.5401480628901914e-16|+,0,0,->\n", - "1.8848440321775826e-16|+,0,+-,+>\n", - "6.875896571807488e-17|+,+-,0,+>\n", - "-5.4760712468196594e-17|+,+-,+-,+>\n", - "-6.882027183139035e-17|+,+-,+-,->\n", - "-1.5700057020842492e-14|-,0,0,+>\n", - "-1.1907652139455507e-14|-,0,0,->\n", - "5.949970128600278e-16|-,0,+-,->\n", - "7.350452092318703e-17|-,+-,0,->\n", - "4.448653039413031e-16|-,+-,+-,+>\n", - "2.501445125369858e-18|-,+-,+-,->\n", - "Ending Cycle94\n", - "2.901420081271418e-18|+,0,0,+>\n", - "-1.5967563410253337e-16|+,0,0,->\n", - "2.087519909169601e-16|+,0,+-,+>\n", - "7.267851454430556e-17|+,+-,0,+>\n", - "-6.337766131106248e-17|+,+-,+-,+>\n", - "-7.434508313871075e-17|+,+-,+-,->\n", - "-1.5612978612445268e-14|-,0,0,+>\n", - "-1.194598425960473e-14|-,0,0,->\n", - "5.779084307879158e-16|-,0,+-,->\n", - "8.141942399765367e-17|-,+-,0,->\n", - "4.485403758302411e-16|-,+-,+-,+>\n", - "2.693995640298052e-18|-,+-,+-,->\n", - "Ending Cycle95\n", - "1.4604587166729207e-18|+,0,0,+>\n", - "-1.70673828968725e-16|+,0,0,->\n", - "2.092931953926649e-16|+,0,+-,+>\n", - "7.675868794575619e-17|+,+-,0,+>\n", - "-6.501583494358345e-17|+,+-,+-,+>\n", - "-7.557999225273485e-17|+,+-,+-,->\n", - "-1.545411937197284e-14|-,0,0,+>\n", - "-1.1991166562448557e-14|-,0,0,->\n", - "5.59496185030857e-16|-,0,+-,->\n", - "8.879617361921949e-17|-,+-,0,->\n", - "3.863133684192225e-16|-,+-,+-,+>\n", - "3.382508374042161e-18|-,+-,+-,->\n", - "Ending Cycle96\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~0.000000: 36%|███▌ | 99/278 [00:06<00:10, 16.85it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "4.1618983853128996e-18|+,0,0,+>\n", - "-1.784316660629273e-16|+,0,0,->\n", - "2.1342937430440133e-16|+,0,+-,+>\n", - "7.09662029600273e-17|+,+-,0,+>\n", - "-7.103710135881584e-17|+,+-,+-,+>\n", - "-8.537376778390893e-17|+,+-,+-,->\n", - "-1.550641450901405e-14|-,0,0,+>\n", - "-1.2046421532862245e-14|-,0,0,->\n", - "5.441651293174132e-16|-,0,+-,->\n", - "9.429531325457469e-17|-,+-,0,->\n", - "3.7897010830034524e-16|-,+-,+-,+>\n", - "3.3214659739225565e-18|-,+-,+-,->\n", - "Ending Cycle97\n", - "1.7282844991696177e-18|+,0,0,+>\n", - "-1.8698137205828336e-16|+,0,0,->\n", - "2.1959166064637735e-16|+,0,+-,+>\n", - "8.033695480501418e-17|+,+-,0,+>\n", - "-7.109400706637322e-17|+,+-,+-,+>\n", - "-9.140393869779016e-17|+,+-,+-,->\n", - "-1.553350802775259e-14|-,0,0,+>\n", - "-1.2110452550510043e-14|-,0,0,->\n", - "5.28726068190476e-16|-,0,+-,->\n", - "9.762503497133964e-17|-,+-,0,->\n", - "3.0461567104728404e-16|-,+-,+-,+>\n", - "1.988408096326188e-18|-,+-,+-,->\n", - "Ending Cycle98\n", - "-4.6669720638000776e-18|+,0,0,+>\n", - "-1.9255998578354593e-16|+,0,0,->\n", - "2.228836259723309e-16|+,0,+-,+>\n", - "9.553910727190011e-17|+,+-,0,+>\n", - "-7.383306550616552e-17|+,+-,+-,+>\n", - "-9.795939569543599e-17|+,+-,+-,->\n", - "-1.5508156336221494e-14|-,0,0,+>\n", - "-1.2186958681657217e-14|-,0,0,->\n", - "5.14588176702198e-16|-,0,+-,->\n", - "9.973027442545248e-17|-,+-,0,->\n", - "2.6817680569931625e-16|-,+-,+-,+>\n", - "1.898620534239597e-18|-,+-,+-,->\n", - "Ending Cycle99\n", - "2.0364007307348244e-18|+,0,0,+>\n", - "-1.946963849083606e-16|+,0,0,->\n", - "2.1377332995026652e-16|+,0,+-,+>\n", - "1.0670384728259887e-16|+,+-,0,+>\n", - "-7.948708156320308e-17|+,+-,+-,+>\n", - "-1.0420886095844277e-16|+,+-,+-,->\n", - "-1.556864733957311e-14|-,0,0,+>\n", - "-1.2271414198541597e-14|-,0,0,->\n", - "5.013712509339549e-16|-,0,+-,->\n", - "9.882523835531088e-17|-,+-,0,->\n", - "2.8630379854234666e-16|-,+-,+-,+>\n", - "1.3521363566621697e-18|-,+-,+-,->\n", - "Ending Cycle100\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 37%|███▋ | 103/278 [00:06<00:12, 14.52it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.5436208203333118e-18|+,0,0,+>\n", - "-2.0047697679538898e-16|+,0,0,->\n", - "1.9885434860246607e-16|+,0,+-,+>\n", - "1.1227796151016583e-16|+,+-,0,+>\n", - "-8.269796812575271e-17|+,+-,+-,+>\n", - "-1.1155319774045188e-16|+,+-,+-,->\n", - "-1.5627953575003637e-14|-,0,0,+>\n", - "-1.2370440477236405e-14|-,0,0,->\n", - "4.896011800622123e-16|-,0,+-,->\n", - "9.584731526904241e-17|-,+-,0,->\n", - "2.361400917731576e-16|-,+-,+-,+>\n", - "3.9852021963051426e-19|-,+-,+-,->\n", - "Ending Cycle101\n", - "-5.1064104301000624e-18|+,0,0,+>\n", - "-2.120438339122912e-16|+,0,0,->\n", - "1.8268278606140035e-16|+,0,+-,+>\n", - "1.3193166760550162e-16|+,+-,0,+>\n", - "-8.872885488218272e-17|+,+-,+-,+>\n", - "-4.434112041717082e-17|+,+-,+-,->\n", - "-1.5624277878850946e-14|-,0,0,+>\n", - "-1.248972031997266e-14|-,0,0,->\n", - "4.796563193999492e-16|-,0,+-,->\n", - "8.895792476584799e-17|-,+-,0,->\n", - "2.2931118219619725e-16|-,+-,+-,+>\n", - "2.189966573592525e-18|-,+-,+-,->\n", - "Ending Cycle102\n", - "2.9611092982040344e-18|+,0,0,+>\n", - "-2.1907509054815294e-16|+,0,0,->\n", - "1.8100748812582664e-16|+,0,+-,+>\n", - "1.3516097610567882e-16|+,+-,0,+>\n", - "-9.107780757796478e-17|+,+-,+-,+>\n", - "-5.677171908802406e-17|+,+-,+-,->\n", - "-1.5803607773104788e-14|-,0,0,+>\n", - "-1.2622637326526058e-14|-,0,0,->\n", - "4.638043327479576e-16|-,0,+-,->\n", - "7.891790989101045e-17|-,+-,0,->\n", - "1.7549300921551542e-16|-,+-,+-,+>\n", - "1.0919591693825835e-18|-,+-,+-,->\n", - "Ending Cycle103\n", - "-6.333668588177271e-18|+,0,0,+>\n", - "-2.284427956458797e-16|+,0,0,->\n", - "1.7422165221197677e-16|+,0,+-,+>\n", - "1.5929722029109483e-16|+,+-,0,+>\n", - "-9.06145836233975e-17|+,+-,+-,+>\n", - "-6.954508971013336e-17|+,+-,+-,->\n", - "-1.587285258895573e-14|-,0,0,+>\n", - "-1.2778843450116715e-14|-,0,0,->\n", - "4.402819651538589e-16|-,0,+-,->\n", - "1.1018224514969794e-16|-,+-,0,->\n", - "1.379921883880778e-16|-,+-,+-,+>\n", - "1.5124366790817459e-19|-,+-,+-,->\n", - "Ending Cycle104\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 38%|███▊ | 107/278 [00:06<00:10, 15.74it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-6.693671040520098e-18|+,0,0,+>\n", - "-1.2144345996895593e-16|+,0,0,->\n", - "1.7441915936846494e-16|+,0,+-,+>\n", - "1.879422451601502e-16|+,+-,0,+>\n", - "-9.441780835780293e-17|+,+-,+-,+>\n", - "-8.543809283165066e-17|+,+-,+-,->\n", - "-1.5847598281910833e-14|-,0,0,+>\n", - "-1.2953398716870383e-14|-,0,0,->\n", - "4.153437808575564e-16|-,0,+-,->\n", - "1.3893189538442173e-16|-,+-,0,->\n", - "1.1111006857000956e-16|-,+-,+-,+>\n", - "-1.2456227084232106e-18|-,+-,+-,->\n", - "Ending Cycle105\n", - "1.8978786926913265e-18|+,0,0,+>\n", - "-1.2997696368638174e-16|+,0,0,->\n", - "1.7573908623870027e-16|+,0,+-,+>\n", - "2.0225733604052981e-16|+,+-,0,+>\n", - "-1.0218154892433309e-16|+,+-,+-,+>\n", - "-9.769714972137285e-17|+,+-,+-,->\n", - "-1.5790555781888188e-14|-,0,0,+>\n", - "-1.3155950726552078e-14|-,0,0,->\n", - "3.8496336075921136e-16|-,0,+-,->\n", - "1.6502085949543962e-16|-,+-,0,->\n", - "8.105597255799595e-17|-,+-,+-,+>\n", - "-2.190202975493606e-18|-,+-,+-,->\n", - "Ending Cycle106\n", - "3.8394237183240154e-18|+,0,0,+>\n", - "-1.3358022346133733e-16|+,0,0,->\n", - "1.8418970230372539e-16|+,0,+-,+>\n", - "2.0894613098546943e-16|+,+-,0,+>\n", - "-1.0507770935176837e-16|+,+-,+-,+>\n", - "-1.1208148257037418e-16|+,+-,+-,->\n", - "-1.593077028030164e-14|-,0,0,+>\n", - "-1.3391907417575844e-14|-,0,0,->\n", - "3.505034068734342e-16|-,0,+-,->\n", - "1.8460245036242465e-16|-,+-,0,->\n", - "1.1946144586514906e-16|-,+-,+-,+>\n", - "-6.595107861030921e-19|-,+-,+-,->\n", - "Ending Cycle107\n", - "4.561695978671001e-18|+,0,0,+>\n", - "-1.4621853865816597e-16|+,0,0,->\n", - "2.0177498517474468e-16|+,0,+-,+>\n", - "2.1118312797013136e-16|+,+-,0,+>\n", - "-1.0762898031579115e-16|+,+-,+-,+>\n", - "-1.2464876324004483e-16|+,+-,+-,->\n", - "-1.6058567465335018e-14|-,0,0,+>\n", - "-1.366305293229372e-14|-,0,0,->\n", - "3.179514447234462e-16|-,0,+-,->\n", - "1.9689775486270602e-16|-,+-,0,->\n", - "1.1198507119025142e-16|-,+-,+-,+>\n", - "-9.624178800145035e-19|-,+-,+-,->\n", - "Ending Cycle108\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 40%|███▉ | 111/278 [00:06<00:10, 16.46it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3.985873157324234e-18|+,0,0,+>\n", - "-1.543814403118779e-16|+,0,0,->\n", - "2.433448851188466e-16|+,0,+-,+>\n", - "2.2102174588816764e-16|+,+-,0,+>\n", - "-1.077406900373472e-16|+,+-,+-,+>\n", - "-1.37795635733483e-16|+,+-,+-,->\n", - "-1.6022830608072056e-14|-,0,0,+>\n", - "-1.3970460697663561e-14|-,0,0,->\n", - "2.8510313861518817e-16|-,0,+-,->\n", - "2.0184886876896347e-16|-,+-,0,->\n", - "1.1520600044002815e-16|-,+-,+-,+>\n", - "-1.7426548417664605e-18|-,+-,+-,->\n", - "Ending Cycle109\n", - "1.0593820181517904e-18|+,0,0,+>\n", - "-1.6716618718614038e-16|+,0,0,->\n", - "2.8460168007995344e-16|+,0,+-,+>\n", - "2.296379432923422e-16|+,+-,0,+>\n", - "-1.1044032643676353e-16|+,+-,+-,+>\n", - "-1.597016539222692e-16|+,+-,+-,->\n", - "-1.6129984916100767e-14|-,0,0,+>\n", - "-1.432709638652198e-14|-,0,0,->\n", - "2.373187336527404e-16|-,0,+-,->\n", - "1.992237028620267e-16|-,+-,0,->\n", - "5.794107549284404e-17|-,+-,+-,+>\n", - "-2.760252688219548e-19|-,+-,+-,->\n", - "Ending Cycle110\n", - "2.0201324717323624e-18|+,0,0,+>\n", - "-1.7415488250978098e-16|+,0,0,->\n", - "2.753627059649599e-16|+,0,+-,+>\n", - "2.2995932045339846e-16|+,+-,0,+>\n", - "-1.0944465234726622e-16|+,+-,+-,+>\n", - "-1.7516906717303438e-16|+,+-,+-,->\n", - "-1.6137482381269372e-14|-,0,0,+>\n", - "-1.471834795909794e-14|-,0,0,->\n", - "1.9167312642612838e-16|-,0,+-,->\n", - "2.345561403580004e-16|-,+-,0,->\n", - "1.2396415711596773e-16|-,+-,+-,+>\n", - "4.565036455037406e-19|-,+-,+-,->\n", - "Ending Cycle111\n", - "2.8491132712203982e-18|+,0,0,+>\n", - "-1.785091491773239e-16|+,0,0,->\n", - "2.598057196980776e-16|+,0,+-,+>\n", - "2.361107451406903e-16|+,+-,0,+>\n", - "-1.0942153988980044e-16|+,+-,+-,+>\n", - "-1.8627006924251084e-16|+,+-,+-,->\n", - "-1.631065572575006e-14|-,0,0,+>\n", - "-1.5139060019356647e-14|-,0,0,->\n", - "1.566384999087749e-16|-,0,+-,->\n", - "2.1498998940196671e-16|-,+-,0,->\n", - "1.566466886509068e-17|-,+-,+-,+>\n", - "6.761868569103759e-19|-,+-,+-,->\n", - "Ending Cycle112\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 41%|████▏ | 115/278 [00:07<00:09, 16.67it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "6.469698139103581e-18|+,0,0,+>\n", - "-1.813380948313692e-16|+,0,0,->\n", - "2.7000570432748573e-16|+,0,+-,+>\n", - "2.3400005650484383e-16|+,+-,0,+>\n", - "-1.0888632262032651e-16|+,+-,+-,+>\n", - "-2.0448559379546679e-16|+,+-,+-,->\n", - "-1.629611971550669e-14|-,0,0,+>\n", - "-1.564408040054869e-14|-,0,0,->\n", - "1.0847904504701877e-16|-,0,+-,->\n", - "2.35699468904434e-16|-,+-,0,->\n", - "2.1228319368192666e-17|-,+-,+-,+>\n", - "-6.338033534774022e-19|-,+-,+-,->\n", - "Ending Cycle113\n", - "3.5074420511471794e-18|+,0,0,+>\n", - "-1.8923162054673524e-16|+,0,0,->\n", - "3.0629667897778045e-16|+,0,+-,+>\n", - "2.431715405631657e-16|+,+-,0,+>\n", - "-1.1161026458951147e-16|+,+-,+-,+>\n", - "-2.2175029738456128e-16|+,+-,+-,->\n", - "-1.6385864067927112e-14|-,0,0,+>\n", - "-1.626216855522875e-14|-,0,0,->\n", - "4.164947039278588e-17|-,0,+-,->\n", - "2.402611612159869e-16|-,+-,0,->\n", - "5.194897870702883e-17|-,+-,+-,+>\n", - "-6.970928008398773e-20|-,+-,+-,->\n", - "Ending Cycle114\n", - "2.987724402608824e-18|+,0,0,+>\n", - "-1.5585233498587903e-16|+,0,0,->\n", - "3.180290379209523e-16|+,0,+-,+>\n", - "2.458127785562357e-16|+,+-,0,+>\n", - "-1.1158483648343464e-16|+,+-,+-,+>\n", - "-2.3684542688165086e-16|+,+-,+-,->\n", - "-1.6506579153787636e-14|-,0,0,+>\n", - "-1.696153113897997e-14|-,0,0,->\n", - "-2.7494927190043495e-17|-,0,+-,->\n", - "2.2188117145645415e-16|-,+-,0,->\n", - "1.8267794800375634e-17|-,+-,+-,+>\n", - "-2.195944986564791e-18|-,+-,+-,->\n", - "Ending Cycle115\n", - "2.9822100846836253e-18|+,0,0,+>\n", - "-1.621773969305905e-16|+,0,0,->\n", - "3.0504807498246604e-16|+,0,+-,+>\n", - "2.4933769484468226e-16|+,+-,0,+>\n", - "-1.1639129995518266e-16|+,+-,+-,+>\n", - "-2.593684566609835e-16|+,+-,+-,->\n", - "-1.6759873815298313e-14|-,0,0,+>\n", - "-1.7779474016411e-14|-,0,0,->\n", - "-1.0742373925216866e-16|-,0,+-,->\n", - "2.3511621952063087e-16|-,+-,0,->\n", - "1.054153494833813e-16|-,+-,+-,+>\n", - "-1.807462525168778e-18|-,+-,+-,->\n", - "Ending Cycle116\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 43%|████▎ | 119/278 [00:07<00:09, 16.83it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3.885661598761861e-18|+,0,0,+>\n", - "-1.7433931062065093e-16|+,0,0,->\n", - "2.939572950279794e-16|+,0,+-,+>\n", - "2.5252941234285357e-16|+,+-,0,+>\n", - "-1.183936888746866e-16|+,+-,+-,+>\n", - "-2.756697321159496e-16|+,+-,+-,->\n", - "-1.6987390883948885e-14|-,0,0,+>\n", - "-1.8738015200774688e-14|-,0,0,->\n", - "-1.963161346494472e-16|-,0,+-,->\n", - "2.260078354643135e-16|-,+-,0,->\n", - "1.009780520020494e-16|-,+-,+-,+>\n", - "4.5488058849507585e-19|-,+-,+-,->\n", - "Ending Cycle117\n", - "4.241228095751977e-18|+,0,0,+>\n", - "-1.8080689394591458e-16|+,0,0,->\n", - "3.119817410647658e-16|+,0,+-,+>\n", - "2.5784705319019233e-16|+,+-,0,+>\n", - "-1.228677010331245e-16|+,+-,+-,+>\n", - "-3.0623909496591227e-16|+,+-,+-,->\n", - "-1.7159150427323865e-14|-,0,0,+>\n", - "-1.986622004863092e-14|-,0,0,->\n", - "-3.0510623307522236e-16|-,0,+-,->\n", - "2.363743689422227e-16|-,+-,0,->\n", - "1.1965292098844402e-16|-,+-,+-,+>\n", - "5.733750650396419e-19|-,+-,+-,->\n", - "Ending Cycle118\n", - "5.931949957952405e-18|+,0,0,+>\n", - "-1.8917364223136045e-16|+,0,0,->\n", - "3.2147926660854555e-16|+,0,+-,+>\n", - "2.597988465345945e-16|+,+-,0,+>\n", - "-1.2028609910808787e-16|+,+-,+-,+>\n", - "-3.2737108608237286e-16|+,+-,+-,->\n", - "-1.7248815939283882e-14|-,0,0,+>\n", - "-2.111761999462464e-14|-,0,0,->\n", - "-4.3412765077485423e-16|-,0,+-,->\n", - "2.640819893290804e-16|-,+-,0,->\n", - "1.605317233707489e-16|-,+-,+-,+>\n", - "-1.0563489625252486e-19|-,+-,+-,->\n", - "Ending Cycle119\n", - "3.2038201470748513e-18|+,0,0,+>\n", - "-2.0354140143813102e-16|+,0,0,->\n", - "3.35747895910534e-16|+,0,+-,+>\n", - "2.6802420101415923e-16|+,+-,0,+>\n", - "-1.2163396099026604e-16|+,+-,+-,+>\n", - "-3.507034240980524e-16|+,+-,+-,->\n", - "-1.7423918788629764e-14|-,0,0,+>\n", - "-2.2373266136958726e-14|-,0,0,->\n", - "-5.641425349579691e-16|-,0,+-,->\n", - "2.666422732093096e-16|-,+-,0,->\n", - "1.6005327554634868e-16|-,+-,+-,+>\n", - "-8.672416138806431e-19|-,+-,+-,->\n", - "Ending Cycle120\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 44%|████▍ | 123/278 [00:07<00:09, 16.73it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "4.727142625234115e-18|+,0,0,+>\n", - "-2.135136904396148e-16|+,0,0,->\n", - "3.7538882972470523e-16|+,0,+-,+>\n", - "2.688872960536203e-16|+,+-,0,+>\n", - "-1.28852639479721e-16|+,+-,+-,+>\n", - "-3.7264246174601294e-16|+,+-,+-,->\n", - "-1.7654070770609824e-14|-,0,0,+>\n", - "-2.3767237037434686e-14|-,0,0,->\n", - "-7.272171040315671e-16|-,0,+-,->\n", - "2.6832348195646296e-16|-,+-,0,->\n", - "1.0049724673179665e-16|-,+-,+-,+>\n", - "-3.3251810406029345e-18|-,+-,+-,->\n", - "Ending Cycle121\n", - "6.045278338573156e-18|+,0,0,+>\n", - "-2.2948729969518957e-16|+,0,0,->\n", - "-3.820302587030513e-18|+,0,+-,+>\n", - "-1.1073317481132237e-16|+,+-,0,+>\n", - "-1.2410840396646363e-16|+,+-,+-,+>\n", - "-3.9301169930811e-16|+,+-,+-,->\n", - "-1.7745191482880088e-14|-,0,0,+>\n", - "-2.4796023190413067e-14|-,0,0,->\n", - "-8.672319468522002e-16|-,0,+-,->\n", - "2.9768338619786114e-16|-,+-,0,->\n", - "1.0357545963586676e-16|-,+-,+-,+>\n", - "-2.164225054751835e-18|-,+-,+-,->\n", - "Ending Cycle122\n", - "-2.36173192738951e-19|+,0,0,+>\n", - "-2.3974888082367324e-16|+,0,0,->\n", - "-1.1061989412302977e-17|+,0,+-,+>\n", - "-1.282347099187321e-16|+,+-,0,+>\n", - "-1.2944294124007717e-16|+,+-,+-,+>\n", - "-4.1153783363028303e-16|+,+-,+-,->\n", - "-1.7967968252979152e-14|-,0,0,+>\n", - "-2.569932052363868e-14|-,0,0,->\n", - "-9.956087245444664e-16|-,0,+-,->\n", - "2.7858026257818414e-16|-,+-,0,->\n", - "8.008629171561371e-17|-,+-,+-,+>\n", - "1.3717249046998338e-18|-,+-,+-,->\n", - "Ending Cycle123\n", - "2.5632273577629434e-19|+,0,0,+>\n", - "-2.5220897386688364e-16|+,0,0,->\n", - "1.5505594880629108e-17|+,0,+-,+>\n", - "-1.128590578816409e-16|+,+-,0,+>\n", - "-1.324916288198233e-16|+,+-,+-,+>\n", - "-4.18543493805894e-16|+,+-,+-,->\n", - "-1.830649033327897e-14|-,0,0,+>\n", - "-2.6388104218090558e-14|-,0,0,->\n", - "-1.140518822183721e-15|-,0,+-,->\n", - "3.0847110495096407e-16|-,+-,0,->\n", - "3.1738787000080953e-18|-,+-,+-,+>\n", - "1.3178710332155872e-18|-,+-,+-,->\n", - "Ending Cycle124\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 46%|████▌ | 127/278 [00:07<00:08, 16.83it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3.197844656165938e-18|+,0,0,+>\n", - "-2.4884601220923955e-16|+,0,0,->\n", - "2.596937340459811e-17|+,0,+-,+>\n", - "-1.1954227287967935e-16|+,+-,0,+>\n", - "-1.3988232475662043e-16|+,+-,+-,+>\n", - "-4.1623117228278513e-16|+,+-,+-,->\n", - "-1.870380991048751e-14|-,0,0,+>\n", - "-2.682642249181149e-14|-,0,0,->\n", - "-1.3295035532288114e-15|-,0,+-,->\n", - "3.38176072049423e-16|-,+-,0,->\n", - "-1.0734348570021671e-16|-,+-,+-,+>\n", - "-6.377158865519194e-19|-,+-,+-,->\n", - "Ending Cycle125\n", - "-5.62631017753885e-18|+,0,0,+>\n", - "-2.1263670767909264e-16|+,0,0,->\n", - "2.101099747919329e-17|+,0,+-,+>\n", - "-9.421308250190406e-17|+,+-,0,+>\n", - "-1.4524719454368554e-16|+,+-,+-,+>\n", - "-4.327875349828023e-16|+,+-,+-,->\n", - "-1.9025351697072726e-14|-,0,0,+>\n", - "-2.6829238731355093e-14|-,0,0,->\n", - "-1.551255738240356e-15|-,0,+-,->\n", - "3.694322861548864e-16|-,+-,0,->\n", - "-1.7818800685237037e-16|-,+-,+-,+>\n", - "1.9649023566476534e-18|-,+-,+-,->\n", - "Ending Cycle126\n", - "2.1461589044285605e-18|+,0,0,+>\n", - "-2.2001745235468683e-16|+,0,0,->\n", - "7.262800110346339e-18|+,0,+-,+>\n", - "-1.0111495105450945e-16|+,+-,0,+>\n", - "-1.4399005297303683e-16|+,+-,+-,+>\n", - "-4.367385142726679e-16|+,+-,+-,->\n", - "-1.9156711620627955e-14|-,0,0,+>\n", - "-2.6147510045158448e-14|-,0,0,->\n", - "-1.810504531735656e-15|-,0,+-,->\n", - "3.920822765368772e-16|-,+-,0,->\n", - "-1.9081159561545103e-16|-,+-,+-,+>\n", - "1.258800085402667e-18|-,+-,+-,->\n", - "Ending Cycle127\n", - "1.2103589127392917e-18|+,0,0,+>\n", - "-2.202123278047886e-16|+,0,0,->\n", - "2.0784895707007436e-17|+,0,+-,+>\n", - "-9.202247501646798e-17|+,+-,0,+>\n", - "-1.5552447617087855e-16|+,+-,+-,+>\n", - "-4.396561107614466e-16|+,+-,+-,->\n", - "-1.94481333477569e-14|-,0,0,+>\n", - "-2.4599034389320642e-14|-,0,0,->\n", - "-2.098923957397284e-15|-,0,+-,->\n", - "3.9992054314219347e-16|-,+-,0,->\n", - "-1.2147229111393736e-16|-,+-,+-,+>\n", - "-9.739114554958172e-19|-,+-,+-,->\n", - "Ending Cycle128\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 47%|████▋ | 131/278 [00:08<00:08, 16.96it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.189905597397261e-19|+,0,0,+>\n", - "-2.28730865391649e-16|+,0,0,->\n", - "1.777046923121975e-17|+,0,+-,+>\n", - "-8.997402707902963e-17|+,+-,0,+>\n", - "-1.5834658436509232e-16|+,+-,+-,+>\n", - "-4.314623137352164e-16|+,+-,+-,->\n", - "-1.970928531351349e-14|-,0,0,+>\n", - "-2.452064651406267e-14|-,0,0,->\n", - "-1.970420498601856e-15|-,0,+-,->\n", - "4.3505348573298347e-16|-,+-,0,->\n", - "-1.253300175002825e-16|-,+-,+-,+>\n", - "-1.3578398565981223e-18|-,+-,+-,->\n", - "Ending Cycle129\n", - "3.759205744005203e-18|+,0,0,+>\n", - "-2.40543422290939e-16|+,0,0,->\n", - "5.899822756391962e-18|+,0,+-,+>\n", - "-1.0088693424962334e-16|+,+-,0,+>\n", - "-1.6310217443664024e-16|+,+-,+-,+>\n", - "-4.2260197096822706e-16|+,+-,+-,->\n", - "-1.996849714965346e-14|-,0,0,+>\n", - "-2.4403195803448746e-14|-,0,0,->\n", - "-1.8844109804654386e-15|-,0,+-,->\n", - "4.1445387924085736e-16|-,+-,0,->\n", - "-1.0775172305898711e-16|-,+-,+-,+>\n", - "1.7912073668329573e-19|-,+-,+-,->\n", - "Ending Cycle130\n", - "2.525634787463452e-18|+,0,0,+>\n", - "-2.759565674584476e-16|+,0,0,->\n", - "-5.150220012275075e-18|+,0,+-,+>\n", - "-9.284711786255199e-17|+,+-,0,+>\n", - "-1.7035908599909615e-16|+,+-,+-,+>\n", - "-4.142068337356518e-16|+,+-,+-,->\n", - "-2.0157317712991917e-14|-,0,0,+>\n", - "-2.4249607372262353e-14|-,0,0,->\n", - "-1.8012282955891207e-15|-,0,+-,->\n", - "4.0128917566523147e-16|-,+-,0,->\n", - "-1.0091740788778026e-16|-,+-,+-,+>\n", - "-1.9023937170051916e-18|-,+-,+-,->\n", - "Ending Cycle131\n", - "6.286132011761353e-18|+,0,0,+>\n", - "-2.905468119743789e-16|+,0,0,->\n", - "1.898842351171473e-17|+,0,+-,+>\n", - "-9.64880271400074e-17|+,+-,0,+>\n", - "-1.7398581616321848e-16|+,+-,+-,+>\n", - "-4.0397571354647067e-16|+,+-,+-,->\n", - "-2.031422532167576e-14|-,0,0,+>\n", - "-2.404415220587011e-14|-,0,0,->\n", - "-1.7288067760444942e-15|-,0,+-,->\n", - "3.9623826667052915e-16|-,+-,0,->\n", - "-1.0572508625548314e-16|-,+-,+-,+>\n", - "2.132339935569369e-18|-,+-,+-,->\n", - "Ending Cycle132\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 49%|████▊ | 135/278 [00:08<00:08, 16.34it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.069346001433292e-17|+,0,0,+>\n", - "-2.898171411197029e-16|+,0,0,->\n", - "1.1629140338246212e-17|+,0,+-,+>\n", - "-1.0996221045450212e-16|+,+-,0,+>\n", - "-1.7426487748719531e-16|+,+-,+-,+>\n", - "-4.00131189628725e-16|+,+-,+-,->\n", - "-2.055046069617829e-14|-,0,0,+>\n", - "-2.3782472483119327e-14|-,0,0,->\n", - "-1.6598205051966107e-15|-,0,+-,->\n", - "4.025184864625167e-16|-,+-,0,->\n", - "-6.026063062250821e-17|-,+-,+-,+>\n", - "3.247529377288662e-18|-,+-,+-,->\n", - "Ending Cycle133\n", - "7.051582034376554e-18|+,0,0,+>\n", - "-2.893666240563239e-16|+,0,0,->\n", - "1.9022159498949343e-17|+,0,+-,+>\n", - "-1.1770634013996962e-16|+,+-,0,+>\n", - "-1.7688713109190557e-16|+,+-,+-,+>\n", - "-3.815388669390788e-16|+,+-,+-,->\n", - "-2.0980385215561552e-14|-,0,0,+>\n", - "-2.3454031208789312e-14|-,0,0,->\n", - "-1.5907157562144933e-15|-,0,+-,->\n", - "4.1856763871128724e-16|-,+-,0,->\n", - "-9.56641215560613e-17|-,+-,+-,+>\n", - "4.1736677582421585e-18|-,+-,+-,->\n", - "Ending Cycle134\n", - "4.2891302540285085e-18|+,0,0,+>\n", - "-2.7669606894556504e-16|+,0,0,->\n", - "-4.458933151858453e-17|+,0,+-,+>\n", - "-1.704902827329759e-16|+,+-,0,+>\n", - "-1.89176078097962e-16|+,+-,+-,+>\n", - "-3.5867419766236187e-16|+,+-,+-,->\n", - "-2.1245914190183246e-14|-,0,0,+>\n", - "-2.302199568843611e-14|-,0,0,->\n", - "-1.52775769820092e-15|-,0,+-,->\n", - "3.944849337112088e-16|-,+-,0,->\n", - "-1.120267644928033e-16|-,+-,+-,+>\n", - "1.9875741355212064e-18|-,+-,+-,->\n", - "Ending Cycle135\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 50%|█████ | 139/278 [00:08<00:08, 15.55it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "8.895585617062374e-19|+,0,0,+>\n", - "-2.6147759389512186e-16|+,0,0,->\n", - "-3.520066942516538e-17|+,0,+-,+>\n", - "-1.5358800713530035e-16|+,+-,0,+>\n", - "-1.8996054307933192e-16|+,+-,+-,+>\n", - "-3.408234731018482e-16|+,+-,+-,->\n", - "-2.153418330562094e-14|-,0,0,+>\n", - "-2.2485159382413944e-14|-,0,0,->\n", - "-1.4694115254807788e-15|-,0,+-,->\n", - "3.8849319839588847e-16|-,+-,0,->\n", - "-1.8478111931944453e-16|-,+-,+-,+>\n", - "-3.7039330104718616e-20|-,+-,+-,->\n", - "Ending Cycle136\n", - "3.0943466467420523e-18|+,0,0,+>\n", - "-2.5666484970712304e-16|+,0,0,->\n", - "-2.0592173825021272e-17|+,0,+-,+>\n", - "-1.3459433335078717e-16|+,+-,0,+>\n", - "-2.0021137518597132e-16|+,+-,+-,+>\n", - "-3.1028810460035765e-16|+,+-,+-,->\n", - "-2.1843209543418624e-14|-,0,0,+>\n", - "-2.1828559793852297e-14|-,0,0,->\n", - "-1.4190717000600543e-15|-,0,+-,->\n", - "4.0937847448419613e-16|-,+-,0,->\n", - "-2.927904080933159e-16|-,+-,+-,+>\n", - "1.2843169168387729e-18|-,+-,+-,->\n", - "Ending Cycle137\n", - "-6.145011778133965e-20|+,0,0,+>\n", - "-2.598546935006089e-16|+,0,0,->\n", - "-1.7437483895611696e-17|+,0,+-,+>\n", - "-1.1396491742547092e-16|+,+-,0,+>\n", - "-1.9562840939814057e-16|+,+-,+-,+>\n", - "-2.8519146696938927e-16|+,+-,+-,->\n", - "-2.2174685562852575e-14|-,0,0,+>\n", - "-2.100294179667711e-14|-,0,0,->\n", - "-1.3690243707800467e-15|-,0,+-,->\n", - "4.0187975338709266e-16|-,+-,0,->\n", - "-3.0559781950458404e-16|-,+-,+-,+>\n", - "-9.826772009321072e-19|-,+-,+-,->\n", - "Ending Cycle138\n", - "-5.3354551803404995e-18|+,0,0,+>\n", - "-2.5728965291876045e-16|+,0,0,->\n", - "-7.24496459332464e-18|+,0,+-,+>\n", - "-9.123671388897918e-17|+,+-,0,+>\n", - "-1.9522347501092763e-16|+,+-,+-,+>\n", - "-2.5204283174487038e-16|+,+-,+-,->\n", - "-2.250307658269818e-14|-,0,0,+>\n", - "-1.9943209303394722e-14|-,0,0,->\n", - "-1.30708730466507e-15|-,0,+-,->\n", - "4.2894536380908947e-16|-,+-,0,->\n", - "-4.067716284558205e-16|-,+-,+-,+>\n", - "-3.898695517748561e-18|-,+-,+-,->\n", - "Ending Cycle139\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 51%|█████▏ | 143/278 [00:08<00:09, 14.96it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.0214120661705661e-16|+,0,0,+>\n", - "-2.5552487559187723e-16|+,0,0,->\n", - "1.7155101187592978e-17|+,0,+-,+>\n", - "-5.524510917902457e-17|+,+-,0,+>\n", - "-1.9211956702336391e-16|+,+-,+-,+>\n", - "-2.1851746932079168e-16|+,+-,+-,->\n", - "-2.267251508349129e-14|-,0,0,+>\n", - "-1.8603138752395214e-14|-,0,0,->\n", - "-1.2582119675359516e-15|-,0,+-,->\n", - "4.504397272318704e-16|-,+-,0,->\n", - "-4.0106327664326284e-16|-,+-,+-,+>\n", - "-6.89556818496942e-18|-,+-,+-,->\n", - "Ending Cycle140\n", - "1.0453016385086791e-16|+,0,0,+>\n", - "-2.625822937085884e-16|+,0,0,->\n", - "3.375345857212407e-17|+,0,+-,+>\n", - "-4.788287737633533e-17|+,+-,0,+>\n", - "-1.9534510159449398e-16|+,+-,+-,+>\n", - "-1.0259273367455849e-16|+,+-,+-,->\n", - "-2.2967246606868665e-14|-,0,0,+>\n", - "-1.8623977818872302e-14|-,0,0,->\n", - "-1.1696349356260739e-15|-,0,+-,->\n", - "4.744891597650192e-16|-,+-,0,->\n", - "-3.696649597202595e-16|-,+-,+-,+>\n", - "-7.108189109678296e-18|-,+-,+-,->\n", - "Ending Cycle141\n", - "1.0701574455425208e-16|+,0,0,+>\n", - "-2.522602579189855e-16|+,0,0,->\n", - "4.1477132366093286e-17|+,0,+-,+>\n", - "-3.9589297925941597e-17|+,+-,0,+>\n", - "-2.0155637323258401e-16|+,+-,+-,+>\n", - "-7.480774883382404e-17|+,+-,+-,->\n", - "-2.3391597422683593e-14|-,0,0,+>\n", - "-1.8646275539622694e-14|-,0,0,->\n", - "-1.1060044807231776e-15|-,0,+-,->\n", - "4.832102786579655e-16|-,+-,0,->\n", - "-4.1625051149490336e-16|-,+-,+-,+>\n", - "-7.722951118531747e-18|-,+-,+-,->\n", - "Ending Cycle142\n", - "1.0967290251184055e-16|+,0,0,+>\n", - "-2.439072758020462e-16|+,0,0,->\n", - "7.102099327977879e-17|+,0,+-,+>\n", - "-3.715054153376197e-17|+,+-,0,+>\n", - "-1.9856608527959615e-16|+,+-,+-,+>\n", - "1.3028474138446087e-17|+,+-,+-,->\n", - "-2.3616187002048927e-14|-,0,0,+>\n", - "-1.8653562693326285e-14|-,0,0,->\n", - "-1.0463533036375618e-15|-,0,+-,->\n", - "4.3445376341053077e-16|-,+-,0,->\n", - "-3.835117093980849e-16|-,+-,+-,+>\n", - "-6.309317872547076e-18|-,+-,+-,->\n", - "Ending Cycle143\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 53%|█████▎ | 147/278 [00:09<00:08, 14.82it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.602218140083663e-17|+,0,0,+>\n", - "-2.3176879855472925e-16|+,0,0,->\n", - "6.382617672865121e-17|+,0,+-,+>\n", - "-4.211630065397383e-18|+,+-,0,+>\n", - "-2.1189022571337723e-16|+,+-,+-,+>\n", - "5.175968671342705e-17|+,+-,+-,->\n", - "-2.388977232922316e-14|-,0,0,+>\n", - "-1.8673479912681613e-14|-,0,0,->\n", - "-9.889668400012385e-16|-,0,+-,->\n", - "4.442254008629536e-16|-,+-,0,->\n", - "-2.8168226737212097e-16|-,+-,+-,+>\n", - "-5.3227646053237895e-18|-,+-,+-,->\n", - "Ending Cycle144\n", - "1.0760481050767631e-16|+,0,0,+>\n", - "-2.226124438135469e-16|+,0,0,->\n", - "5.271970630861015e-17|+,0,+-,+>\n", - "2.9581072728983822e-18|+,+-,0,+>\n", - "-2.1183969884789947e-16|+,+-,+-,+>\n", - "2.7200634914712404e-16|+,+-,+-,->\n", - "-2.433130447880978e-14|-,0,0,+>\n", - "-1.8692943034080928e-14|-,0,0,->\n", - "-9.30210884114406e-16|-,0,+-,->\n", - "4.561956360432673e-16|-,+-,0,->\n", - "-2.224278995985274e-16|-,+-,+-,+>\n", - "-7.749202794244107e-18|-,+-,+-,->\n", - "Ending Cycle145\n", - "1.1003935372152884e-16|+,0,0,+>\n", - "-2.423431489795988e-16|+,0,0,->\n", - "6.385605566217096e-17|+,0,+-,+>\n", - "4.81638137741761e-18|+,+-,0,+>\n", - "-2.0883602113227517e-16|+,+-,+-,+>\n", - "3.792861857196973e-16|+,+-,+-,->\n", - "-2.472644389541342e-14|-,0,0,+>\n", - "-1.8708544251578534e-14|-,0,0,->\n", - "-8.513072981498883e-16|-,0,+-,->\n", - "4.661380001633547e-16|-,+-,0,->\n", - "-2.247129426590784e-16|-,+-,+-,+>\n", - "-8.036234046013573e-18|-,+-,+-,->\n", - "Ending Cycle146\n", - "1.0050144428784939e-16|+,0,0,+>\n", - "-2.232572667499991e-16|+,0,0,->\n", - "6.16258651743587e-17|+,0,+-,+>\n", - "3.462385471451228e-17|+,+-,0,+>\n", - "-2.1503549158051806e-16|+,+-,+-,+>\n", - "3.989748681515946e-16|+,+-,+-,->\n", - "-2.502782516333418e-14|-,0,0,+>\n", - "-1.8724906886571415e-14|-,0,0,->\n", - "-7.8647683389655595e-16|-,0,+-,->\n", - "4.763607116007838e-16|-,+-,0,->\n", - "-2.226975284177386e-16|-,+-,+-,+>\n", - "-9.921711482936244e-18|-,+-,+-,->\n", - "Ending Cycle147\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 54%|█████▍ | 151/278 [00:09<00:08, 14.77it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.0691610339801997e-16|+,0,0,+>\n", - "-2.1394448038250282e-16|+,0,0,->\n", - "8.121589921445321e-17|+,0,+-,+>\n", - "2.838655100946614e-17|+,+-,0,+>\n", - "-2.1791736676224708e-16|+,+-,+-,+>\n", - "3.062763672386453e-16|+,+-,+-,->\n", - "-2.542563147871281e-14|-,0,0,+>\n", - "-1.8739087251934438e-14|-,0,0,->\n", - "-7.187991566082279e-16|-,0,+-,->\n", - "4.907099478097232e-16|-,+-,0,->\n", - "-2.419888021200433e-16|-,+-,+-,+>\n", - "-9.773396773203327e-18|-,+-,+-,->\n", - "Ending Cycle148\n", - "1.0248746379227304e-16|+,0,0,+>\n", - "-2.168057107742488e-16|+,0,0,->\n", - "9.672208901328375e-17|+,0,+-,+>\n", - "5.1842400063987596e-17|+,+-,0,+>\n", - "-2.2159595100364543e-16|+,+-,+-,+>\n", - "1.9203389627626283e-16|+,+-,+-,->\n", - "-2.571640614733574e-14|-,0,0,+>\n", - "-1.8752212975065604e-14|-,0,0,->\n", - "-6.480608253214205e-16|-,0,+-,->\n", - "4.996954541722057e-16|-,+-,0,->\n", - "-2.284379371398903e-16|-,+-,+-,+>\n", - "-9.539034037911752e-18|-,+-,+-,->\n", - "Ending Cycle149\n", - "1.0380431104410626e-16|+,0,0,+>\n", - "-2.1881329109327175e-16|+,0,0,->\n", - "8.35448863177163e-17|+,0,+-,+>\n", - "6.494679795961766e-17|+,+-,0,+>\n", - "-2.203489568830813e-16|+,+-,+-,+>\n", - "1.6510044597342532e-16|+,+-,+-,->\n", - "-2.60036860953354e-14|-,0,0,+>\n", - "-1.8761347053701224e-14|-,0,0,->\n", - "-5.793157524556357e-16|-,0,+-,->\n", - "5.141098178432263e-16|-,+-,0,->\n", - "-1.8464176396614568e-16|-,+-,+-,+>\n", - "-1.148514356234549e-17|-,+-,+-,->\n", - "Ending Cycle150\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\r", - "n=100, tau=0.01, energy~-1.729014: 55%|█████▌ | 153/278 [00:09<00:08, 14.61it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.603589327173535e-17|+,0,0,+>\n", - "-2.1624270366598015e-16|+,0,0,->\n", - "1.1509725656415265e-16|+,0,+-,+>\n", - "1.0189914777791125e-16|+,+-,0,+>\n", - "-2.2426502298979176e-16|+,+-,+-,+>\n", - "1.9935214793061187e-17|+,+-,+-,->\n", - "-2.63336848547666e-14|-,0,0,+>\n", - "-1.8768781714629486e-14|-,0,0,->\n", - "-5.150774910246263e-16|-,0,+-,->\n", - "5.267513851614678e-16|-,+-,0,->\n", - "-2.4085882555018103e-16|-,+-,+-,+>\n", - "-1.2941966394533557e-17|-,+-,+-,->\n", - "Ending Cycle151\n", - "1.0415007989145163e-16|+,0,0,+>\n", - "-2.1492499247070374e-16|+,0,0,->\n", - "1.4379122756791904e-16|+,0,+-,+>\n", - "1.2748118470295791e-16|+,+-,0,+>\n", - "-2.2479063862975135e-16|+,+-,+-,+>\n", - "1.4796828435383282e-16|+,+-,+-,->\n", - "-2.6748132109419783e-14|-,0,0,+>\n", - "-1.877322693127546e-14|-,0,0,->\n", - "-4.512775835479675e-16|-,0,+-,->\n", - "5.460652476478011e-16|-,+-,0,->\n", - "-3.409350528489379e-16|-,+-,+-,+>\n", - "-1.6588625314309727e-17|-,+-,+-,->\n", - "Ending Cycle152\n", - "1.1091490831743282e-16|+,0,0,+>\n", - "-1.962042531961883e-16|+,0,0,->\n", - "1.6391917681232062e-16|+,0,+-,+>\n", - "1.3906929323001488e-16|+,+-,0,+>\n", - "-2.2072759136367543e-16|+,+-,+-,+>\n", - "2.425941942936033e-16|+,+-,+-,->\n", - "-2.7179425314983987e-14|-,0,0,+>\n", - "-1.8775943256801013e-14|-,0,0,->\n", - "-3.849198181805841e-16|-,0,+-,->\n", - "5.613689642375317e-16|-,+-,0,->\n", - "-3.085586499319454e-16|-,+-,+-,+>\n", - "-1.4434500249895913e-17|-,+-,+-,->\n", - "Ending Cycle153\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 56%|█████▋ | 157/278 [00:09<00:08, 14.47it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.821406254468573e-17|+,0,0,+>\n", - "-2.0245631361729786e-16|+,0,0,->\n", - "2.0118365570618836e-16|+,0,+-,+>\n", - "1.8129347694578636e-16|+,+-,0,+>\n", - "-2.1846039304772189e-16|+,+-,+-,+>\n", - "2.7274578780578735e-16|+,+-,+-,->\n", - "-2.752412544130661e-14|-,0,0,+>\n", - "-1.877390667654601e-14|-,0,0,->\n", - "-3.2038919598390263e-16|-,0,+-,->\n", - "5.755191952281856e-16|-,+-,0,->\n", - "-2.655021288478138e-16|-,+-,+-,+>\n", - "-1.4110545118648008e-17|-,+-,+-,->\n", - "Ending Cycle154\n", - "1.0813873944263215e-16|+,0,0,+>\n", - "-2.007639188511898e-16|+,0,0,->\n", - "2.0820106434561367e-16|+,0,+-,+>\n", - "2.0367752249075676e-16|+,+-,0,+>\n", - "-2.250868483494045e-16|+,+-,+-,+>\n", - "1.4412213842415193e-16|+,+-,+-,->\n", - "-2.7710557730449087e-14|-,0,0,+>\n", - "-1.8765423804690367e-14|-,0,0,->\n", - "-2.5864381307903387e-16|-,0,+-,->\n", - "5.989873577870487e-16|-,+-,0,->\n", - "-2.8699769100711426e-16|-,+-,+-,+>\n", - "-1.3401925474211958e-17|-,+-,+-,->\n", - "Ending Cycle155\n", - "1.0871050602136667e-16|+,0,0,+>\n", - "-2.0519216060247175e-16|+,0,0,->\n", - "2.759938461711752e-16|+,0,+-,+>\n", - "2.2641892818041515e-16|+,+-,0,+>\n", - "-2.289523664034627e-16|+,+-,+-,+>\n", - "2.9471836705213377e-16|+,+-,+-,->\n", - "-2.8179600148903547e-14|-,0,0,+>\n", - "-1.8751833431013587e-14|-,0,0,->\n", - "-2.0106808874382409e-16|-,0,+-,->\n", - "6.18117848697401e-16|-,+-,0,->\n", - "-3.794403133751524e-16|-,+-,+-,+>\n", - "-1.533095870105347e-17|-,+-,+-,->\n", - "Ending Cycle156\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\r", - "n=100, tau=0.01, energy~-1.729014: 57%|█████▋ | 159/278 [00:09<00:08, 14.25it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.563287855881621e-17|+,0,0,+>\n", - "-2.203254567686229e-16|+,0,0,->\n", - "3.0825225602356814e-16|+,0,+-,+>\n", - "2.70668662806855e-16|+,+-,0,+>\n", - "-2.2807443430511197e-16|+,+-,+-,+>\n", - "2.226296364096781e-16|+,+-,+-,->\n", - "-2.8391377398563185e-14|-,0,0,+>\n", - "-1.8733064765539075e-14|-,0,0,->\n", - "-1.3846024273917032e-16|-,0,+-,->\n", - "6.390101603189387e-16|-,+-,0,->\n", - "-3.716360231312618e-16|-,+-,+-,+>\n", - "-1.4146116535872745e-17|-,+-,+-,->\n", - "Ending Cycle157\n", - "1.0908320835278901e-16|+,0,0,+>\n", - "-2.3045194623293553e-16|+,0,0,->\n", - "3.417053991972674e-16|+,0,+-,+>\n", - "2.9377806802776753e-16|+,+-,0,+>\n", - "-2.2930139227089117e-16|+,+-,+-,+>\n", - "2.8428474196423904e-16|+,+-,+-,->\n", - "-2.860670008644685e-14|-,0,0,+>\n", - "-1.870618722533627e-14|-,0,0,->\n", - "-7.602930341044778e-17|-,0,+-,->\n", - "6.646462914283499e-16|-,+-,0,->\n", - "-3.6430607086141034e-16|-,+-,+-,+>\n", - "-1.4330839276414382e-17|-,+-,+-,->\n", - "Ending Cycle158\n", - "1.104112665526134e-16|+,0,0,+>\n", - "-2.461100235842677e-16|+,0,0,->\n", - "3.8851116415752837e-16|+,0,+-,+>\n", - "3.205964198245221e-16|+,+-,0,+>\n", - "-2.3502500399389263e-16|+,+-,+-,+>\n", - "4.3459318661544203e-16|+,+-,+-,->\n", - "-2.8882184812854044e-14|-,0,0,+>\n", - "-1.8670528537245093e-14|-,0,0,->\n", - "-1.4609211841070543e-17|-,0,+-,->\n", - "6.881509589468558e-16|-,+-,0,->\n", - "-3.9358024489992514e-16|-,+-,+-,+>\n", - "-1.2857477445787453e-17|-,+-,+-,->\n", - "Ending Cycle159\n", - "9.537262792434214e-17|+,0,0,+>\n", - "-2.6413691257349743e-16|+,0,0,->\n", - "4.0388167126552593e-16|+,0,+-,+>\n", - "3.7207579840493665e-16|+,+-,0,+>\n", - "-2.3920849992615283e-16|+,+-,+-,+>\n", - "5.157309805758317e-16|+,+-,+-,->\n", - "-2.925489117847207e-14|-,0,0,+>\n", - "-1.862902250105544e-14|-,0,0,->\n", - "5.3654961084509e-17|-,0,+-,->\n", - "7.157501961267995e-16|-,+-,0,->\n", - "-4.70327684054575e-16|-,+-,+-,+>\n", - "-9.509185158569493e-18|-,+-,+-,->\n", - "Ending Cycle160\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 59%|█████▊ | 163/278 [00:10<00:07, 15.30it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.1009374202004546e-16|+,0,0,+>\n", - "-2.833507171454651e-16|+,0,0,->\n", - "4.3780774493103543e-16|+,0,+-,+>\n", - "4.0024810985360023e-16|+,+-,0,+>\n", - "-2.433387696126213e-16|+,+-,+-,+>\n", - "5.114792675314403e-16|+,+-,+-,->\n", - "-2.962240666597465e-14|-,0,0,+>\n", - "-1.857854218556557e-14|-,0,0,->\n", - "1.1655626328737715e-16|-,0,+-,->\n", - "7.489986770984944e-16|-,+-,0,->\n", - "-4.808041682598711e-16|-,+-,+-,+>\n", - "-1.5249777764376525e-17|-,+-,+-,->\n", - "Ending Cycle161\n", - "1.0340600488409064e-16|+,0,0,+>\n", - "-2.9495065793605344e-16|+,0,0,->\n", - "4.409208618536205e-16|+,0,+-,+>\n", - "3.905544686746201e-16|+,+-,0,+>\n", - "-2.5262409022448857e-16|+,+-,+-,+>\n", - "5.560629941842617e-16|+,+-,+-,->\n", - "-2.987348287445957e-14|-,0,0,+>\n", - "-1.8516721644000837e-14|-,0,0,->\n", - "1.8564015848148685e-16|-,0,+-,->\n", - "7.78657408580704e-16|-,+-,0,->\n", - "-4.44799401515467e-16|-,+-,+-,+>\n", - "-7.57203493787478e-18|-,+-,+-,->\n", - "Ending Cycle162\n", - "9.44009303812123e-17|+,0,0,+>\n", - "-3.021534323528713e-16|+,0,0,->\n", - "4.4108207958341585e-16|+,0,+-,+>\n", - "4.0606051703367557e-16|+,+-,0,+>\n", - "-2.461170039086663e-16|+,+-,+-,+>\n", - "5.315296275818099e-16|+,+-,+-,->\n", - "-3.031668706140265e-14|-,0,0,+>\n", - "-1.8439199257380228e-14|-,0,0,->\n", - "2.5674624314621347e-16|-,0,+-,->\n", - "8.09964486256033e-16|-,+-,0,->\n", - "-4.3605191115549767e-16|-,+-,+-,+>\n", - "-1.1654460923922675e-17|-,+-,+-,->\n", - "Ending Cycle163\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 60%|██████ | 167/278 [00:10<00:07, 14.75it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.54941669706043e-17|+,0,0,+>\n", - "-3.180013633003316e-16|+,0,0,->\n", - "4.247469711332414e-16|+,0,+-,+>\n", - "4.1657510151675307e-16|+,+-,0,+>\n", - "-2.489624701818621e-16|+,+-,+-,+>\n", - "5.982270680245301e-16|+,+-,+-,->\n", - "-3.062779256239712e-14|-,0,0,+>\n", - "-1.8347617447289935e-14|-,0,0,->\n", - "3.2122265730757694e-16|-,0,+-,->\n", - "8.449759598052114e-16|-,+-,0,->\n", - "-4.509248469554871e-16|-,+-,+-,+>\n", - "-1.2927865848649458e-17|-,+-,+-,->\n", - "Ending Cycle164\n", - "1.0798154205065848e-16|+,0,0,+>\n", - "-3.4749974798210704e-16|+,0,0,->\n", - "4.766275720176855e-16|+,0,+-,+>\n", - "4.508794115956364e-16|+,+-,0,+>\n", - "-2.5515750559439263e-16|+,+-,+-,+>\n", - "5.956066765941981e-16|+,+-,+-,->\n", - "-3.101821896483094e-14|-,0,0,+>\n", - "-1.8240217430998187e-14|-,0,0,->\n", - "3.855792068920477e-16|-,0,+-,->\n", - "8.847911329022176e-16|-,+-,0,->\n", - "-4.269710833959843e-16|-,+-,+-,+>\n", - "-1.2418606582763339e-17|-,+-,+-,->\n", - "Ending Cycle165\n", - "8.882137446286602e-17|+,0,0,+>\n", - "-3.8033003964104635e-16|+,0,0,->\n", - "4.628171453467572e-16|+,0,+-,+>\n", - "4.746949738743813e-16|+,+-,0,+>\n", - "-2.648672785499737e-16|+,+-,+-,+>\n", - "7.690129327911497e-16|+,+-,+-,->\n", - "-3.1215343260579776e-14|-,0,0,+>\n", - "-1.811204878510528e-14|-,0,0,->\n", - "4.569356451463174e-16|-,0,+-,->\n", - "9.334667497534387e-16|-,+-,0,->\n", - "-3.6698025829620407e-16|-,+-,+-,+>\n", - "-9.620854614839219e-18|-,+-,+-,->\n", - "Ending Cycle166\n", - "8.627864094053271e-17|+,0,0,+>\n", - "-4.0983081116962577e-16|+,0,0,->\n", - "4.811790254627603e-16|+,0,+-,+>\n", - "5.302685019057327e-16|+,+-,0,+>\n", - "-2.511796466496047e-16|+,+-,+-,+>\n", - "7.135070633352409e-16|+,+-,+-,->\n", - "-3.1366453529470285e-14|-,0,0,+>\n", - "-1.7958485946862318e-14|-,0,0,->\n", - "5.330218968144438e-16|-,0,+-,->\n", - "9.864760743466817e-16|-,+-,0,->\n", - "-3.388278339660551e-16|-,+-,+-,+>\n", - "-1.0979624509724192e-17|-,+-,+-,->\n", - "Ending Cycle167\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 62%|██████▏ | 171/278 [00:10<00:07, 14.49it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.0080322371230124e-16|+,0,0,+>\n", - "-4.385238362359978e-16|+,0,0,->\n", - "4.96386848259619e-16|+,0,+-,+>\n", - "5.276075743116437e-16|+,+-,0,+>\n", - "-2.5784259141775757e-16|+,+-,+-,+>\n", - "7.944888257778458e-16|+,+-,+-,->\n", - "-3.169562869038206e-14|-,0,0,+>\n", - "-1.778164483158144e-14|-,0,0,->\n", - "6.050887198092381e-16|-,0,+-,->\n", - "1.0469419509755058e-15|-,+-,0,->\n", - "-3.2610552274580827e-16|-,+-,+-,+>\n", - "-1.2172038624239006e-17|-,+-,+-,->\n", - "Ending Cycle168\n", - "9.48627767116694e-17|+,0,0,+>\n", - "-4.893026314411418e-16|+,0,0,->\n", - "5.180573194314309e-16|+,0,+-,+>\n", - "5.621580707802422e-16|+,+-,0,+>\n", - "-2.5990095943129323e-16|+,+-,+-,+>\n", - "8.351666864993743e-16|+,+-,+-,->\n", - "-3.1969362666674646e-14|-,0,0,+>\n", - "-1.757338375161435e-14|-,0,0,->\n", - "6.808864183059527e-16|-,0,+-,->\n", - "1.1108225496371906e-15|-,+-,0,->\n", - "-4.053087159454969e-16|-,+-,+-,+>\n", - "-1.244075322699531e-17|-,+-,+-,->\n", - "Ending Cycle169\n", - "9.073398235595335e-17|+,0,0,+>\n", - "-5.333621760828407e-16|+,0,0,->\n", - "5.174212657126826e-16|+,0,+-,+>\n", - "5.618017425813403e-16|+,+-,0,+>\n", - "-2.52368464188632e-16|+,+-,+-,+>\n", - "9.755089522504026e-16|+,+-,+-,->\n", - "-3.2345601399483144e-14|-,0,0,+>\n", - "-1.7339141782526884e-14|-,0,0,->\n", - "7.602029223083081e-16|-,0,+-,->\n", - "1.1839220139586272e-15|-,+-,0,->\n", - "-4.350815947567669e-16|-,+-,+-,+>\n", - "-8.866321918520866e-18|-,+-,+-,->\n", - "Ending Cycle170\n", - "9.444761692875859e-17|+,0,0,+>\n", - "-5.804998509172058e-16|+,0,0,->\n", - "5.880100585430067e-16|+,0,+-,+>\n", - "6.204993264387557e-16|+,+-,0,+>\n", - "-2.5160669712764455e-16|+,+-,+-,+>\n", - "1.0333988590993472e-15|+,+-,+-,->\n", - "-3.259529568496245e-14|-,0,0,+>\n", - "-1.7077478343228907e-14|-,0,0,->\n", - "8.447542326235354e-16|-,0,+-,->\n", - "1.2590971114704709e-15|-,+-,0,->\n", - "-3.8331281171122695e-16|-,+-,+-,+>\n", - "-7.891605482769959e-18|-,+-,+-,->\n", - "Ending Cycle171\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 63%|██████▎ | 175/278 [00:10<00:06, 15.09it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "8.810996726035924e-17|+,0,0,+>\n", - "-6.028220927519679e-16|+,0,0,->\n", - "6.063174910623635e-16|+,0,+-,+>\n", - "6.739914792920157e-16|+,+-,0,+>\n", - "-2.4114769267625106e-16|+,+-,+-,+>\n", - "1.0392772583104198e-15|+,+-,+-,->\n", - "-3.295408586849092e-14|-,0,0,+>\n", - "-1.6783367298603323e-14|-,0,0,->\n", - "9.346031318567684e-16|-,0,+-,->\n", - "1.3516350206252503e-15|-,+-,0,->\n", - "-3.8712764522741077e-16|-,+-,+-,+>\n", - "-4.293497543158631e-18|-,+-,+-,->\n", - "Ending Cycle172\n", - "9.462279975887852e-17|+,0,0,+>\n", - "-6.382061252914816e-16|+,0,0,->\n", - "6.208964814490447e-16|+,0,+-,+>\n", - "6.77756190291836e-16|+,+-,0,+>\n", - "-2.4253557323953e-16|+,+-,+-,+>\n", - "1.2220572112131362e-15|+,+-,+-,->\n", - "-3.333552984502975e-14|-,0,0,+>\n", - "-1.6435100399890616e-14|-,0,0,->\n", - "1.031198411418819e-15|-,0,+-,->\n", - "1.4514932113048448e-15|-,+-,0,->\n", - "-4.101684430831878e-16|-,+-,+-,+>\n", - "-6.6497471907874685e-18|-,+-,+-,->\n", - "Ending Cycle173\n", - "8.449292226120167e-17|+,0,0,+>\n", - "-7.112600080143475e-16|+,0,0,->\n", - "6.287640335622943e-16|+,0,+-,+>\n", - "7.033894555507818e-16|+,+-,0,+>\n", - "-2.4683625694523497e-16|+,+-,+-,+>\n", - "1.2498286908480977e-15|+,+-,+-,->\n", - "-3.378563246864023e-14|-,0,0,+>\n", - "-1.6036777221512126e-14|-,0,0,->\n", - "1.1301978213277094e-15|-,0,+-,->\n", - "1.5583088456821995e-15|-,+-,0,->\n", - "-4.070852058718782e-16|-,+-,+-,+>\n", - "-6.468265550896086e-18|-,+-,+-,->\n", - "Ending Cycle174\n", - "8.688595911098469e-17|+,0,0,+>\n", - "-7.713396731230607e-16|+,0,0,->\n", - "6.519857401651338e-16|+,0,+-,+>\n", - "7.409836921413653e-16|+,+-,0,+>\n", - "-2.4952929576479824e-16|+,+-,+-,+>\n", - "1.226791974071126e-15|+,+-,+-,->\n", - "-3.396434117492364e-14|-,0,0,+>\n", - "-1.556781058759759e-14|-,0,0,->\n", - "1.2297284089025865e-15|-,0,+-,->\n", - "1.683747768157437e-15|-,+-,0,->\n", - "-3.2665608755848327e-16|-,+-,+-,+>\n", - "-5.726582349425456e-18|-,+-,+-,->\n", - "Ending Cycle175\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 64%|██████▍ | 179/278 [00:11<00:06, 15.66it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.556580842606106e-17|+,0,0,+>\n", - "-8.091019723484742e-16|+,0,0,->\n", - "6.697160130487724e-16|+,0,+-,+>\n", - "7.673249751250097e-16|+,+-,0,+>\n", - "-2.630669075190506e-16|+,+-,+-,+>\n", - "1.3400679538760149e-15|+,+-,+-,->\n", - "-3.4257014669877405e-14|-,0,0,+>\n", - "-1.5037514248497626e-14|-,0,0,->\n", - "1.3431066033984959e-15|-,0,+-,->\n", - "1.7598072236465448e-15|-,+-,0,->\n", - "-3.2526429150234374e-16|-,+-,+-,+>\n", - "-7.974375784150272e-19|-,+-,+-,->\n", - "Ending Cycle176\n", - "8.883284771269037e-17|+,0,0,+>\n", - "-8.721462031369718e-16|+,0,0,->\n", - "7.160976438229745e-16|+,0,+-,+>\n", - "8.104760746046245e-16|+,+-,0,+>\n", - "-2.5453508895445494e-16|+,+-,+-,+>\n", - "1.4548006221701786e-15|+,+-,+-,->\n", - "-3.445554864929705e-14|-,0,0,+>\n", - "-1.4432725941977068e-14|-,0,0,->\n", - "1.4624635064351268e-15|-,0,+-,->\n", - "1.8512565078301815e-15|-,+-,0,->\n", - "-2.5081609906964093e-16|-,+-,+-,+>\n", - "-1.4839007913673993e-20|-,+-,+-,->\n", - "Ending Cycle177\n", - "8.62191689705486e-17|+,0,0,+>\n", - "-9.417458795501707e-16|+,0,0,->\n", - "7.830002036547278e-16|+,0,+-,+>\n", - "8.728628056647637e-16|+,+-,0,+>\n", - "-2.5009300419213794e-16|+,+-,+-,+>\n", - "1.4643729713305861e-15|+,+-,+-,->\n", - "-3.486595173637958e-14|-,0,0,+>\n", - "-1.3769085219874396e-14|-,0,0,->\n", - "1.6157841785171034e-15|-,0,+-,->\n", - "1.964422881851974e-15|-,+-,0,->\n", - "-3.318661037774371e-16|-,+-,+-,+>\n", - "-3.5040730779222394e-18|-,+-,+-,->\n", - "Ending Cycle178\n", - "9.070116616186434e-17|+,0,0,+>\n", - "-1.0302378504720174e-15|+,0,0,->\n", - "8.747387275420667e-16|+,0,+-,+>\n", - "9.446487342415666e-16|+,+-,0,+>\n", - "-2.4003327026771674e-16|+,+-,+-,+>\n", - "1.403198809756458e-15|+,+-,+-,->\n", - "-3.519761657833541e-14|-,0,0,+>\n", - "-1.3025666843129197e-14|-,0,0,->\n", - "1.7819333941876467e-15|-,0,+-,->\n", - "2.095631890970811e-15|-,+-,0,->\n", - "-3.835789442139192e-16|-,+-,+-,+>\n", - "-7.282524345283132e-18|-,+-,+-,->\n", - "Ending Cycle179\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 66%|██████▌ | 183/278 [00:11<00:06, 15.82it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "7.962805131329687e-17|+,0,0,+>\n", - "-1.1166737536343384e-15|+,0,0,->\n", - "9.0402324618174e-16|+,0,+-,+>\n", - "9.906075387630994e-16|+,+-,0,+>\n", - "-2.522567093632798e-16|+,+-,+-,+>\n", - "1.304214628826558e-15|+,+-,+-,->\n", - "-3.554994860791436e-14|-,0,0,+>\n", - "-1.2195274714533564e-14|-,0,0,->\n", - "1.9552295062888186e-15|-,0,+-,->\n", - "2.246805646203926e-15|-,+-,0,->\n", - "-3.6868892266288773e-16|-,+-,+-,+>\n", - "-9.062526105853506e-18|-,+-,+-,->\n", - "Ending Cycle180\n", - "7.850422058811811e-17|+,0,0,+>\n", - "-1.2161995914874379e-15|+,0,0,->\n", - "9.578197703969086e-16|+,0,+-,+>\n", - "1.0717405505306117e-15|+,+-,0,+>\n", - "-2.5431111747175166e-16|+,+-,+-,+>\n", - "1.3137390513248638e-15|+,+-,+-,->\n", - "-3.591690554125888e-14|-,0,0,+>\n", - "-1.1273409342305312e-14|-,0,0,->\n", - "2.162421618732832e-15|-,0,+-,->\n", - "2.420307961837154e-15|-,+-,0,->\n", - "-3.417837344794283e-16|-,+-,+-,+>\n", - "-9.925793325735516e-18|-,+-,+-,->\n", - "Ending Cycle181\n", - "8.270756409419931e-17|+,0,0,+>\n", - "-1.338585722219983e-15|+,0,0,->\n", - "1.0696748326991635e-15|+,0,+-,+>\n", - "1.1545942420254368e-15|+,+-,0,+>\n", - "-2.4204408452072735e-16|+,+-,+-,+>\n", - "1.2306502355110477e-15|+,+-,+-,->\n", - "-3.616017361504534e-14|-,0,0,+>\n", - "-1.0241149944893538e-14|-,0,0,->\n", - "2.373154551674586e-15|-,0,+-,->\n", - "2.6224284809488243e-15|-,+-,0,->\n", - "-3.1716910502970406e-16|-,+-,+-,+>\n", - "-8.15384155574374e-18|-,+-,+-,->\n", - "Ending Cycle182\n", - "8.831574375368547e-17|+,0,0,+>\n", - "-1.3982688030279086e-15|+,0,0,->\n", - "1.1491478215653814e-15|+,0,+-,+>\n", - "1.1942869365586725e-15|+,+-,0,+>\n", - "-2.32261326817019e-16|+,+-,+-,+>\n", - "1.2201633249999387e-15|+,+-,+-,->\n", - "-3.629756029537017e-14|-,0,0,+>\n", - "-9.087699388518516e-15|-,0,0,->\n", - "2.596603091812582e-15|-,0,+-,->\n", - "2.850514075456397e-15|-,+-,0,->\n", - "-3.212868870254012e-16|-,+-,+-,+>\n", - "-4.115920707110883e-18|-,+-,+-,->\n", - "Ending Cycle183\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 67%|██████▋ | 187/278 [00:11<00:05, 16.32it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "7.928086171522129e-17|+,0,0,+>\n", - "-1.4728647150318573e-15|+,0,0,->\n", - "1.301708034572231e-15|+,0,+-,+>\n", - "1.3305612458902684e-15|+,+-,0,+>\n", - "-2.4086873046656526e-16|+,+-,+-,+>\n", - "1.3548091235903215e-15|+,+-,+-,->\n", - "-3.656976793851706e-14|-,0,0,+>\n", - "-7.879785905969941e-15|-,0,0,->\n", - "2.8375817336465004e-15|-,0,+-,->\n", - "3.1059680877386803e-15|-,+-,0,->\n", - "-3.225546528210798e-16|-,+-,+-,+>\n", - "-5.9679992664715175e-18|-,+-,+-,->\n", - "Ending Cycle184\n", - "9.313932921651513e-17|+,0,0,+>\n", - "-1.55201171417381e-15|+,0,0,->\n", - "1.4765409391502733e-15|+,0,+-,+>\n", - "1.4585570105794628e-15|+,+-,0,+>\n", - "-2.3396841063653455e-16|+,+-,+-,+>\n", - "1.4679168019531009e-15|+,+-,+-,->\n", - "-3.663908531457282e-14|-,0,0,+>\n", - "-6.607071391245319e-15|-,0,0,->\n", - "3.090299667454224e-15|-,0,+-,->\n", - "3.394640813521846e-15|-,+-,0,->\n", - "-2.7501344439162857e-16|-,+-,+-,+>\n", - "-8.822732080074121e-18|-,+-,+-,->\n", - "Ending Cycle185\n", - "8.65416366475369e-17|+,0,0,+>\n", - "-1.652785636051376e-15|+,0,0,->\n", - "1.6190271388401397e-15|+,0,+-,+>\n", - "1.5703150869936057e-15|+,+-,0,+>\n", - "-2.573662071250633e-16|+,+-,+-,+>\n", - "1.4317887476867874e-15|+,+-,+-,->\n", - "-3.696677672741524e-14|-,0,0,+>\n", - "-5.271067886645088e-15|-,0,0,->\n", - "3.3922606677160547e-15|-,0,+-,->\n", - "3.685029044862563e-15|-,+-,0,->\n", - "-2.8171340581095993e-16|-,+-,+-,+>\n", - "-7.820985292218808e-18|-,+-,+-,->\n", - "Ending Cycle186\n", - "8.436316817855372e-17|+,0,0,+>\n", - "-1.746364277103193e-15|+,0,0,->\n", - "1.7253529679890588e-15|+,0,+-,+>\n", - "1.7039700347541787e-15|+,+-,0,+>\n", - "-2.4639593801205273e-16|+,+-,+-,+>\n", - "1.4073230566417828e-15|+,+-,+-,->\n", - "-3.724964364095609e-14|-,0,0,+>\n", - "-4.330683027884897e-15|-,0,0,->\n", - "3.427156745209815e-15|-,0,+-,->\n", - "3.737275016604299e-15|-,+-,0,->\n", - "-2.65388283817107e-16|-,+-,+-,+>\n", - "-7.21517711116962e-18|-,+-,+-,->\n", - "Ending Cycle187\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 69%|██████▊ | 191/278 [00:11<00:05, 16.72it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "8.492152936449106e-17|+,0,0,+>\n", - "-1.8551114579768625e-15|+,0,0,->\n", - "1.9193795113538412e-15|+,0,+-,+>\n", - "1.8838546301500386e-15|+,+-,0,+>\n", - "-2.3865136937700685e-16|+,+-,+-,+>\n", - "1.0589659314972188e-15|+,+-,+-,->\n", - "-3.7445976506877405e-14|-,0,0,+>\n", - "-3.790243902989245e-15|-,0,0,->\n", - "3.553953804843001e-15|-,0,+-,->\n", - "3.857214431313281e-15|-,+-,0,->\n", - "-2.6742588742192204e-16|-,+-,+-,+>\n", - "-1.25057067513904e-17|-,+-,+-,->\n", - "Ending Cycle188\n", - "9.551143129948411e-17|+,0,0,+>\n", - "-1.9323745103322598e-15|+,0,0,->\n", - "2.0785281670738516e-15|+,0,+-,+>\n", - "2.0153620623954985e-15|+,+-,0,+>\n", - "-2.3283374556262166e-16|+,+-,+-,+>\n", - "1.1001493247255593e-15|+,+-,+-,->\n", - "-3.764023914595207e-14|-,0,0,+>\n", - "-3.964304514786145e-15|-,0,0,->\n", - "3.7120273556588055e-15|-,0,+-,->\n", - "3.95552679552719e-15|-,+-,0,->\n", - "-1.955211187638403e-16|-,+-,+-,+>\n", - "-1.060497122371089e-17|-,+-,+-,->\n", - "Ending Cycle189\n", - "8.3907688431094e-17|+,0,0,+>\n", - "-2.017086511432012e-15|+,0,0,->\n", - "2.234481281391304e-15|+,0,+-,+>\n", - "2.185934571250514e-15|+,+-,0,+>\n", - "-2.314413151713229e-16|+,+-,+-,+>\n", - "1.4111753972065588e-15|+,+-,+-,->\n", - "-3.797120863435855e-14|-,0,0,+>\n", - "-4.789257650752763e-15|-,0,0,->\n", - "3.867851301393123e-15|-,0,+-,->\n", - "4.0955946080942934e-15|-,+-,0,->\n", - "-2.675975749356444e-16|-,+-,+-,+>\n", - "-1.0931420765536906e-17|-,+-,+-,->\n", - "Ending Cycle190\n", - "9.282486204144282e-17|+,0,0,+>\n", - "-2.117380404330118e-15|+,0,0,->\n", - "2.4018603101935457e-15|+,0,+-,+>\n", - "2.3365153187711496e-15|+,+-,0,+>\n", - "-2.377792912860901e-16|+,+-,+-,+>\n", - "1.366507175075774e-15|+,+-,+-,->\n", - "-3.82402168405215e-14|-,0,0,+>\n", - "-4.917231153317978e-15|-,0,0,->\n", - "4.021126690720243e-15|-,0,+-,->\n", - "4.2309149575781835e-15|-,+-,0,->\n", - "-1.887272854056776e-16|-,+-,+-,+>\n", - "-1.816414164528018e-17|-,+-,+-,->\n", - "Ending Cycle191\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 70%|███████ | 195/278 [00:12<00:04, 16.94it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "9.153218381834454e-17|+,0,0,+>\n", - "-2.238052116428111e-15|+,0,0,->\n", - "2.5654641531754406e-15|+,0,+-,+>\n", - "2.5135543040540568e-15|+,+-,0,+>\n", - "-2.3814890008154385e-16|+,+-,+-,+>\n", - "1.1880690494744502e-15|+,+-,+-,->\n", - "-3.8419368695497376e-14|-,0,0,+>\n", - "-5.042264503173251e-15|-,0,0,->\n", - "4.206428139132649e-15|-,0,+-,->\n", - "4.386563237139718e-15|-,+-,0,->\n", - "-1.7875700548537654e-16|-,+-,+-,+>\n", - "-1.3623078148846449e-17|-,+-,+-,->\n", - "Ending Cycle192\n", - "9.017707444893445e-17|+,0,0,+>\n", - "-2.3592382758626963e-15|+,0,0,->\n", - "2.7659178807528776e-15|+,0,+-,+>\n", - "2.7110671120689595e-15|+,+-,0,+>\n", - "-2.454076950731801e-16|+,+-,+-,+>\n", - "1.2326276390192095e-15|+,+-,+-,->\n", - "-3.870578550123294e-14|-,0,0,+>\n", - "-5.166583668733693e-15|-,0,0,->\n", - "4.388372643408852e-15|-,0,+-,->\n", - "4.548199885104013e-15|-,+-,0,->\n", - "-2.1747015831859523e-16|-,+-,+-,+>\n", - "-1.456905584143533e-17|-,+-,+-,->\n", - "Ending Cycle193\n", - "9.16893710813979e-17|+,0,0,+>\n", - "-2.476787344915023e-15|+,0,0,->\n", - "2.987642068346365e-15|+,0,+-,+>\n", - "2.956452864942016e-15|+,+-,0,+>\n", - "-2.280387908196128e-16|+,+-,+-,+>\n", - "1.3439094920201494e-15|+,+-,+-,->\n", - "-3.9154306795192275e-14|-,0,0,+>\n", - "-5.288288580298625e-15|-,0,0,->\n", - "4.5717754914955276e-15|-,0,+-,->\n", - "4.7145464834384505e-15|-,+-,0,->\n", - "-3.0334532364897847e-16|-,+-,+-,+>\n", - "-1.4958034454563642e-17|-,+-,+-,->\n", - "Ending Cycle194\n", - "9.477717971400862e-17|+,0,0,+>\n", - "-2.5945741422313687e-15|+,0,0,->\n", - "3.2261358999494355e-15|+,0,+-,+>\n", - "3.195646273352886e-15|+,+-,0,+>\n", - "-2.271805381583424e-16|+,+-,+-,+>\n", - "1.371334825512803e-15|+,+-,+-,->\n", - "-3.92336664979193e-14|-,0,0,+>\n", - "-5.4077157144806e-15|-,0,0,->\n", - "4.7811328285008155e-15|-,0,+-,->\n", - "4.8916026060751626e-15|-,+-,0,->\n", - "-3.3832499411705686e-16|-,+-,+-,+>\n", - "-1.9708353790965015e-17|-,+-,+-,->\n", - "Ending Cycle195\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=100, tau=0.01, energy~-1.729014: 72%|███████▏ | 199/278 [00:12<00:04, 16.99it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "8.686296170217064e-17|+,0,0,+>\n", - "-2.719111838835166e-15|+,0,0,->\n", - "3.455774821766961e-15|+,0,+-,+>\n", - "3.428051488241131e-15|+,+-,0,+>\n", - "-2.264516080290265e-16|+,+-,+-,+>\n", - "1.3876682443277892e-15|+,+-,+-,->\n", - "-3.955404066886312e-14|-,0,0,+>\n", - "-5.540742784255827e-15|-,0,0,->\n", - "4.993840946395442e-15|-,0,+-,->\n", - "5.14151921138301e-15|-,+-,0,->\n", - "-3.595726335090447e-16|-,+-,+-,+>\n", - "-1.4853094930423742e-17|-,+-,+-,->\n", - "Ending Cycle196\n", - "8.760872311197675e-17|+,0,0,+>\n", - "-2.844245054938519e-15|+,0,0,->\n", - "3.743992137715132e-15|+,0,+-,+>\n", - "3.732225814994604e-15|+,+-,0,+>\n", - "-2.3865914748671575e-16|+,+-,+-,+>\n", - "1.3897308646030579e-15|+,+-,+-,->\n", - "-3.973708750234414e-14|-,0,0,+>\n", - "-5.66365569889688e-15|-,0,0,->\n", - "5.2223512808270196e-15|-,0,+-,->\n", - "5.385739946782338e-15|-,+-,0,->\n", - "-2.8085544058555244e-16|-,+-,+-,+>\n", - "-1.8727237440796448e-17|-,+-,+-,->\n", - "Ending Cycle197\n", - "1.0355296196977896e-16|+,0,0,+>\n", - "-2.9697381764793914e-15|+,0,0,->\n", - "4.006943086448581e-15|+,0,+-,+>\n", - "4.002674563252459e-15|+,+-,0,+>\n", - "-2.3740928723750123e-16|+,+-,+-,+>\n", - "1.4457672386491123e-15|+,+-,+-,->\n", - "-3.994785014117277e-14|-,0,0,+>\n", - "-5.7697138831918e-15|-,0,0,->\n", - "5.4660238021238176e-15|-,0,+-,->\n", - "5.5799107949482786e-15|-,+-,0,->\n", - "-2.820263806745989e-16|-,+-,+-,+>\n", - "-1.7820755772726366e-17|-,+-,+-,->\n", - "Ending Cycle198\n", - "9.784295771288893e-17|+,0,0,+>\n", - "-3.1059813676709814e-15|+,0,0,->\n", - "4.00341618427838e-15|+,0,+-,+>\n", - "4.00914852373456e-15|+,+-,0,+>\n", - "-2.2353958213752256e-16|+,+-,+-,+>\n", - "1.4790032040883128e-15|+,+-,+-,->\n", - "-4.010930625266575e-14|-,0,0,+>\n", - "-5.890166431795294e-15|-,0,0,->\n", - "5.733038427350018e-15|-,0,+-,->\n", - "5.846100211645076e-15|-,+-,0,->\n", - "-2.8244240718516854e-16|-,+-,+-,+>\n", - "-1.0620649910945647e-17|-,+-,+-,->\n", - "Ending Cycle199\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 72%|███████▏ | 201/278 [00:12<00:05, 13.20it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.0238499039596505e-16|+,0,0,+>\n", - "-3.2642623437232258e-15|+,0,0,->\n", - "4.021428260594285e-15|+,0,+-,+>\n", - "4.00021392777223e-15|+,+-,0,+>\n", - "-2.2148410804490096e-16|+,+-,+-,+>\n", - "1.434414534154341e-15|+,+-,+-,->\n", - "-4.0241882425001687e-14|-,0,0,+>\n", - "-6.0018081282797964e-15|-,0,0,->\n", - "5.709955243890305e-15|-,0,+-,->\n", - "5.813184720095845e-15|-,+-,0,->\n", - "-1.764680381385507e-16|-,+-,+-,+>\n", - "-1.0544917210820146e-17|-,+-,+-,->\n", - "Ending Cycle200\n", - "1.0796027148447792e-16|+,0,0,+>\n", - "-3.4069412074106685e-15|+,0,0,->\n", - "4.040155577982022e-15|+,0,+-,+>\n", - "3.999512576908397e-15|+,+-,0,+>\n", - "-2.248333990777086e-16|+,+-,+-,+>\n", - "1.4213047854578282e-15|+,+-,+-,->\n", - "-4.057751680320489e-14|-,0,0,+>\n", - "-6.119519117450425e-15|-,0,0,->\n", - "5.6795338008400725e-15|-,0,+-,->\n", - "5.804118323737288e-15|-,+-,0,->\n", - "-1.8490406836181692e-16|-,+-,+-,+>\n", - "-6.409459593599066e-18|-,+-,+-,->\n", - "Ending Cycle201\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 74%|███████▎ | 205/278 [00:12<00:05, 14.34it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.1449573683766643e-16|+,0,0,+>\n", - "-3.5528773868490686e-15|+,0,0,->\n", - "4.054177901322724e-15|+,0,+-,+>\n", - "4.004799215076143e-15|+,+-,0,+>\n", - "-2.238014268973071e-16|+,+-,+-,+>\n", - "1.4200530942699034e-15|+,+-,+-,->\n", - "-4.089541576785639e-14|-,0,0,+>\n", - "-6.235546871724701e-15|-,0,0,->\n", - "5.6528941442972664e-15|-,0,+-,->\n", - "5.7958699036028264e-15|-,+-,0,->\n", - "-1.3288639987734602e-16|-,+-,+-,+>\n", - "-9.088895669042681e-18|-,+-,+-,->\n", - "Ending Cycle202\n", - "1.0678496411726322e-16|+,0,0,+>\n", - "-3.69610061766293e-15|+,0,0,->\n", - "4.0426415602428905e-15|+,0,+-,+>\n", - "3.9947948139450385e-15|+,+-,0,+>\n", - "-2.108849017886627e-16|+,+-,+-,+>\n", - "1.4042846173929267e-15|+,+-,+-,->\n", - "-4.112770531930172e-14|-,0,0,+>\n", - "-6.351356329551691e-15|-,0,0,->\n", - "5.636503141066992e-15|-,0,+-,->\n", - "5.7871624746051936e-15|-,+-,0,->\n", - "-1.611106115535763e-16|-,+-,+-,+>\n", - "-1.0113856054448765e-17|-,+-,+-,->\n", - "Ending Cycle203\n", - "1.1672658339767813e-16|+,0,0,+>\n", - "-3.855517564429362e-15|+,0,0,->\n", - "4.020981187571178e-15|+,0,+-,+>\n", - "3.9816442352550425e-15|+,+-,0,+>\n", - "-1.9964484271580828e-16|+,+-,+-,+>\n", - "1.4516826648539606e-15|+,+-,+-,->\n", - "-4.133939977038592e-14|-,0,0,+>\n", - "-6.463501165557485e-15|-,0,0,->\n", - "5.615114142096793e-15|-,0,+-,->\n", - "5.77437720931204e-15|-,+-,0,->\n", - "-1.040714366799827e-16|-,+-,+-,+>\n", - "-1.3711880288217603e-17|-,+-,+-,->\n", - "Ending Cycle204\n", - "1.1235201728764818e-16|+,0,0,+>\n", - "-4.005165889590802e-15|+,0,0,->\n", - "4.008428946146997e-15|+,0,+-,+>\n", - "3.9951612410743645e-15|+,+-,0,+>\n", - "-2.0324114332779986e-16|+,+-,+-,+>\n", - "1.4700711527009452e-15|+,+-,+-,->\n", - "-4.1585146922641345e-14|-,0,0,+>\n", - "-6.57817821681772e-15|-,0,0,->\n", - "5.596752450727601e-15|-,0,+-,->\n", - "5.7591280728612156e-15|-,+-,0,->\n", - "-1.1496629920516429e-16|-,+-,+-,+>\n", - "-1.509540738645821e-17|-,+-,+-,->\n", - "Ending Cycle205\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\r", - "n=200, tau=0.01, energy~-1.859368: 74%|███████▍ | 207/278 [00:13<00:05, 13.19it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.4103529747292686e-16|+,0,0,+>\n", - "-4.1356446940844924e-15|+,0,0,->\n", - "4.2810910954831585e-15|+,0,+-,+>\n", - "4.2594043902239645e-15|+,+-,0,+>\n", - "-2.0323877909284723e-16|+,+-,+-,+>\n", - "1.5561584390322794e-15|+,+-,+-,->\n", - "-4.175071635142476e-14|-,0,0,+>\n", - "-6.691900935908366e-15|-,0,0,->\n", - "5.575954984772422e-15|-,0,+-,->\n", - "5.750655752368772e-15|-,+-,0,->\n", - "-7.553729644155245e-17|-,+-,+-,+>\n", - "-2.2942196130557574e-17|-,+-,+-,->\n", - "Ending Cycle206\n", - "1.3438408935615892e-16|+,0,0,+>\n", - "-4.28358352707079e-15|+,0,0,->\n", - "4.252394165684138e-15|+,0,+-,+>\n", - "4.256509226726631e-15|+,+-,0,+>\n", - "-2.1019462710707484e-16|+,+-,+-,+>\n", - "1.6505276712656035e-15|+,+-,+-,->\n", - "-4.191556789906864e-14|-,0,0,+>\n", - "-6.8054086440733785e-15|-,0,0,->\n", - "5.848630949968608e-15|-,0,+-,->\n", - "6.043153127213564e-15|-,+-,0,->\n", - "-7.079162305403965e-17|-,+-,+-,+>\n", - "-2.2273797218164465e-17|-,+-,+-,->\n", - "Ending Cycle207\n", - "1.4644647363210778e-16|+,0,0,+>\n", - "-4.461925888942796e-15|+,0,0,->\n", - "4.2831260014027814e-15|+,0,+-,+>\n", - "4.273343919417487e-15|+,+-,0,+>\n", - "-2.1581758444189702e-16|+,+-,+-,+>\n", - "1.6386733435566278e-15|+,+-,+-,->\n", - "-4.2099054807177e-14|-,0,0,+>\n", - "-6.9073786390707835e-15|-,0,0,->\n", - "5.839592279157507e-15|-,0,+-,->\n", - "6.008489081893645e-15|-,+-,0,->\n", - "-1.3342763785572094e-16|-,+-,+-,+>\n", - "-2.2748257242090595e-17|-,+-,+-,->\n", - "Ending Cycle208\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 76%|███████▌ | 211/278 [00:13<00:04, 13.50it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.4489061063472452e-16|+,0,0,+>\n", - "-4.619754397140642e-15|+,0,0,->\n", - "4.293529481169682e-15|+,0,+-,+>\n", - "4.304988881836271e-15|+,+-,0,+>\n", - "-2.237202865063612e-16|+,+-,+-,+>\n", - "1.6381026496789522e-15|+,+-,+-,->\n", - "-4.2310851214295235e-14|-,0,0,+>\n", - "-7.017062478772894e-15|-,0,0,->\n", - "5.822898271546874e-15|-,0,+-,->\n", - "6.0047463765814155e-15|-,+-,0,->\n", - "-2.061788177728206e-16|-,+-,+-,+>\n", - "-2.1858613344185963e-17|-,+-,+-,->\n", - "Ending Cycle209\n", - "9.429611962053849e-17|+,0,0,+>\n", - "-4.800414091343091e-15|+,0,0,->\n", - "4.307330893136076e-15|+,0,+-,+>\n", - "4.2961397815498566e-15|+,+-,0,+>\n", - "-2.1959689236487587e-16|+,+-,+-,+>\n", - "1.7244425188415706e-15|+,+-,+-,->\n", - "-4.24797932660886e-14|-,0,0,+>\n", - "-7.10835111870363e-15|-,0,0,->\n", - "5.8097878994648214e-15|-,0,+-,->\n", - "5.9263564285059016e-15|-,+-,0,->\n", - "-1.938622733364845e-16|-,+-,+-,+>\n", - "-2.9475635253339885e-17|-,+-,+-,->\n", - "Ending Cycle210\n", - "2.273150081552556e-16|+,0,0,+>\n", - "-4.995259551264616e-15|+,0,0,->\n", - "4.2708122965772975e-15|+,0,+-,+>\n", - "4.101595145082163e-15|+,+-,0,+>\n", - "-2.19878639197091e-16|+,+-,+-,+>\n", - "1.6862854654613496e-15|+,+-,+-,->\n", - "-4.268380764094374e-14|-,0,0,+>\n", - "-7.195757256624944e-15|-,0,0,->\n", - "5.791498588710135e-15|-,0,+-,->\n", - "5.847952024902163e-15|-,+-,0,->\n", - "-2.2126626766211057e-16|-,+-,+-,+>\n", - "-4.356194947075805e-17|-,+-,+-,->\n", - "Ending Cycle211\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 77%|███████▋ | 215/278 [00:13<00:04, 13.16it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.3669244871159693e-16|+,0,0,+>\n", - "-5.156643920438444e-15|+,0,0,->\n", - "4.2749862371961836e-15|+,0,+-,+>\n", - "4.104053193568589e-15|+,+-,0,+>\n", - "-2.1560033056364923e-16|+,+-,+-,+>\n", - "1.706911932996056e-15|+,+-,+-,->\n", - "-4.2772142587119406e-14|-,0,0,+>\n", - "-7.263373499104986e-15|-,0,0,->\n", - "5.773514525096582e-15|-,0,+-,->\n", - "5.830887281216162e-15|-,+-,0,->\n", - "-2.237292163961724e-16|-,+-,+-,+>\n", - "-4.900731214253437e-17|-,+-,+-,->\n", - "Ending Cycle212\n", - "2.436694630415333e-16|+,0,0,+>\n", - "-5.318346630686516e-15|+,0,0,->\n", - "4.270346837980336e-15|+,0,+-,+>\n", - "4.11095136751768e-15|+,+-,0,+>\n", - "-1.990023339024793e-16|+,+-,+-,+>\n", - "1.735093721783031e-15|+,+-,+-,->\n", - "-4.3001761918420004e-14|-,0,0,+>\n", - "-7.334137372333277e-15|-,0,0,->\n", - "5.750861637873542e-15|-,0,+-,->\n", - "5.8276041517088734e-15|-,+-,0,->\n", - "-2.8010561781640785e-16|-,+-,+-,+>\n", - "-5.4415167650574866e-17|-,+-,+-,->\n", - "Ending Cycle213\n", - "2.501599450650398e-16|+,0,0,+>\n", - "-5.5066570878271804e-15|+,0,0,->\n", - "4.225737513304913e-15|+,0,+-,+>\n", - "4.134171625531965e-15|+,+-,0,+>\n", - "-1.9991804157393526e-16|+,+-,+-,+>\n", - "1.6894704432205697e-15|+,+-,+-,->\n", - "-4.307890513266351e-14|-,0,0,+>\n", - "-7.437755040486326e-15|-,0,0,->\n", - "5.7334523516020776e-15|-,0,+-,->\n", - "5.821970766462661e-15|-,+-,0,->\n", - "-2.1983604635524054e-16|-,+-,+-,+>\n", - "-7.035607326675772e-17|-,+-,+-,->\n", - "Ending Cycle214\n", - "2.1363990260280428e-16|+,0,0,+>\n", - "-5.668651372727403e-15|+,0,0,->\n", - "4.218997854346402e-15|+,0,+-,+>\n", - "4.0949509206955985e-15|+,+-,0,+>\n", - "-1.955491303395153e-16|+,+-,+-,+>\n", - "1.2858149188161468e-15|+,+-,+-,->\n", - "-4.3273013579157886e-14|-,0,0,+>\n", - "-7.536891807258057e-15|-,0,0,->\n", - "5.7106439272655416e-15|-,0,+-,->\n", - "5.8144087931845265e-15|-,+-,0,->\n", - "-2.43833426871221e-16|-,+-,+-,+>\n", - "-3.392952029269826e-17|-,+-,+-,->\n", - "Ending Cycle215\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 79%|███████▉ | 219/278 [00:13<00:03, 14.80it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.488491817399275e-16|+,0,0,+>\n", - "-5.847093192298064e-15|+,0,0,->\n", - "4.238034289446464e-15|+,0,+-,+>\n", - "4.080583716433305e-15|+,+-,0,+>\n", - "-1.9446480788172896e-16|+,+-,+-,+>\n", - "1.1862975730289204e-15|+,+-,+-,->\n", - "-4.357708468206517e-14|-,0,0,+>\n", - "-7.635284270411063e-15|-,0,0,->\n", - "5.68781549529669e-15|-,0,+-,->\n", - "5.794545985597802e-15|-,+-,0,->\n", - "-2.6147595473466594e-16|-,+-,+-,+>\n", - "-1.0641590313042561e-17|-,+-,+-,->\n", - "Ending Cycle216\n", - "2.8145390624900205e-16|+,0,0,+>\n", - "-6.031089077021154e-15|+,0,0,->\n", - "4.2155452181142806e-15|+,0,+-,+>\n", - "4.052055682000526e-15|+,+-,0,+>\n", - "-1.8315520154675897e-16|+,+-,+-,+>\n", - "1.2625584643325399e-15|+,+-,+-,->\n", - "-4.3685486563979885e-14|-,0,0,+>\n", - "-7.691285617690238e-15|-,0,0,->\n", - "5.669208425609134e-15|-,0,+-,->\n", - "5.782155985064921e-15|-,+-,0,->\n", - "-2.2564734218645956e-16|-,+-,+-,+>\n", - "2.2657375479185917e-17|-,+-,+-,->\n", - "Ending Cycle217\n", - "3.0491739017345184e-16|+,0,0,+>\n", - "-6.208840000653907e-15|+,0,0,->\n", - "4.203034919012076e-15|+,0,+-,+>\n", - "4.0587486723857475e-15|+,+-,0,+>\n", - "-1.8129518562101442e-16|+,+-,+-,+>\n", - "1.046266723455002e-15|+,+-,+-,->\n", - "-4.395164118208917e-14|-,0,0,+>\n", - "-7.788376582157455e-15|-,0,0,->\n", - "5.653997449388929e-15|-,0,+-,->\n", - "5.772967494211584e-15|-,+-,0,->\n", - "-2.8292390326253736e-16|-,+-,+-,+>\n", - "5.503630160624402e-17|-,+-,+-,->\n", - "Ending Cycle218\n", - "2.670579786117768e-16|+,0,0,+>\n", - "-6.411951513885918e-15|+,0,0,->\n", - "4.1404728641259385e-15|+,0,+-,+>\n", - "4.058196300583586e-15|+,+-,0,+>\n", - "-1.6757942004880208e-16|+,+-,+-,+>\n", - "1.1226036478446857e-15|+,+-,+-,->\n", - "-4.418419760356424e-14|-,0,0,+>\n", - "-7.838655988525389e-15|-,0,0,->\n", - "5.638931254390375e-15|-,0,+-,->\n", - "5.763712464893674e-15|-,+-,0,->\n", - "-3.150811203456495e-16|-,+-,+-,+>\n", - "8.135357486568787e-17|-,+-,+-,->\n", - "Ending Cycle219\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 80%|████████ | 223/278 [00:14<00:03, 15.61it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.4867643875580476e-16|+,0,0,+>\n", - "-6.589529683220316e-15|+,0,0,->\n", - "4.123518827243663e-15|+,0,+-,+>\n", - "4.063733249921477e-15|+,+-,0,+>\n", - "-1.6861675659506946e-16|+,+-,+-,+>\n", - "9.515682466517565e-16|+,+-,+-,->\n", - "-4.443523276591974e-14|-,0,0,+>\n", - "-7.937623132851053e-15|-,0,0,->\n", - "5.616445663824534e-15|-,0,+-,->\n", - "5.7589860066586834e-15|-,+-,0,->\n", - "-2.824382526582869e-16|-,+-,+-,+>\n", - "4.8525360271671697e-17|-,+-,+-,->\n", - "Ending Cycle220\n", - "2.108347146814661e-16|+,0,0,+>\n", - "-6.781766015764117e-15|+,0,0,->\n", - "4.4692179709991066e-15|+,0,+-,+>\n", - "4.390121944990541e-15|+,+-,0,+>\n", - "-1.7095493751121503e-16|+,+-,+-,+>\n", - "1.0398010374716596e-15|+,+-,+-,->\n", - "-4.449943474847876e-14|-,0,0,+>\n", - "-8.084044141488776e-15|-,0,0,->\n", - "5.5967449329443304e-15|-,0,+-,->\n", - "5.747772376116536e-15|-,+-,0,->\n", - "-3.1775510929837923e-16|-,+-,+-,+>\n", - "2.3953088789235484e-17|-,+-,+-,->\n", - "Ending Cycle221\n", - "1.7135906212814786e-16|+,0,0,+>\n", - "-6.9924373303229646e-15|+,0,0,->\n", - "4.4147571048429596e-15|+,0,+-,+>\n", - "4.384221856153478e-15|+,+-,0,+>\n", - "-1.7888808441891195e-16|+,+-,+-,+>\n", - "1.1401874044956437e-15|+,+-,+-,->\n", - "-4.453653249348367e-14|-,0,0,+>\n", - "-8.200329757836762e-15|-,0,0,->\n", - "5.904100811146046e-15|-,0,+-,->\n", - "6.05898945942127e-15|-,+-,0,->\n", - "-2.7941067935638995e-16|-,+-,+-,+>\n", - "2.879831710203294e-17|-,+-,+-,->\n", - "Ending Cycle222\n", - "8.323600951680316e-17|+,0,0,+>\n", - "-7.184803789747579e-15|+,0,0,->\n", - "4.418283526563233e-15|+,0,+-,+>\n", - "4.401690437343434e-15|+,+-,0,+>\n", - "-1.793329192425998e-16|+,+-,+-,+>\n", - "1.2303651969760768e-15|+,+-,+-,->\n", - "-4.473915301494257e-14|-,0,0,+>\n", - "-8.32111598454462e-15|-,0,0,->\n", - "5.87868137731851e-15|-,0,+-,->\n", - "6.084688177667973e-15|-,+-,0,->\n", - "-2.3113289053472783e-16|-,+-,+-,+>\n", - "2.998259651578986e-17|-,+-,+-,->\n", - "Ending Cycle223\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 82%|████████▏ | 227/278 [00:14<00:03, 16.16it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "8.956509652067452e-17|+,0,0,+>\n", - "-7.373732566600643e-15|+,0,0,->\n", - "4.4449788026714894e-15|+,0,+-,+>\n", - "4.416084236718624e-15|+,+-,0,+>\n", - "-1.6203501924606155e-16|+,+-,+-,+>\n", - "1.326108542908983e-15|+,+-,+-,->\n", - "-4.4888297411959985e-14|-,0,0,+>\n", - "-8.426870630650429e-15|-,0,0,->\n", - "5.8411317165456136e-15|-,0,+-,->\n", - "6.06939036594805e-15|-,+-,0,->\n", - "-2.217580771489466e-16|-,+-,+-,+>\n", - "6.806767423341196e-17|-,+-,+-,->\n", - "Ending Cycle224\n", - "1.1508009011645831e-17|+,0,0,+>\n", - "-7.57726595567035e-15|+,0,0,->\n", - "4.442989396501963e-15|+,0,+-,+>\n", - "4.405047150788724e-15|+,+-,0,+>\n", - "-1.5745911443458302e-16|+,+-,+-,+>\n", - "1.398832454202367e-15|+,+-,+-,->\n", - "-4.503496778337642e-14|-,0,0,+>\n", - "-8.537852553582048e-15|-,0,0,->\n", - "5.809941584690216e-15|-,0,+-,->\n", - "6.052869872432425e-15|-,+-,0,->\n", - "-1.9122260886478932e-16|-,+-,+-,+>\n", - "7.096313875404162e-17|-,+-,+-,->\n", - "Ending Cycle225\n", - "-5.652375864454201e-17|+,0,0,+>\n", - "-7.761170593009854e-15|+,0,0,->\n", - "4.430374113912211e-15|+,0,+-,+>\n", - "4.403708743845202e-15|+,+-,0,+>\n", - "-1.5142869075804782e-16|+,+-,+-,+>\n", - "1.4622349452404095e-15|+,+-,+-,->\n", - "-4.525757792696148e-14|-,0,0,+>\n", - "-8.648991620221297e-15|-,0,0,->\n", - "5.783045595337385e-15|-,0,+-,->\n", - "6.032416613014661e-15|-,+-,0,->\n", - "-2.828408330402423e-16|-,+-,+-,+>\n", - "1.0081028398373699e-16|-,+-,+-,->\n", - "Ending Cycle226\n", - "-7.641633363633725e-17|+,0,0,+>\n", - "-7.946356990813008e-15|+,0,0,->\n", - "4.423200751599095e-15|+,0,+-,+>\n", - "4.37404836892174e-15|+,+-,0,+>\n", - "-1.5363708163658982e-16|+,+-,+-,+>\n", - "1.54039376671244e-15|+,+-,+-,->\n", - "-4.5353631779393975e-14|-,0,0,+>\n", - "-8.748905048410892e-15|-,0,0,->\n", - "5.753315105885885e-15|-,0,+-,->\n", - "6.016629189353329e-15|-,+-,0,->\n", - "-2.5275940882457733e-16|-,+-,+-,+>\n", - "1.228507807433035e-16|-,+-,+-,->\n", - "Ending Cycle227\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 83%|████████▎ | 231/278 [00:14<00:02, 16.50it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-1.076896596362772e-16|+,0,0,+>\n", - "-8.169691747248043e-15|+,0,0,->\n", - "4.410839834159914e-15|+,0,+-,+>\n", - "4.331432846214183e-15|+,+-,0,+>\n", - "-1.6687110113714387e-16|+,+-,+-,+>\n", - "1.5896393579764277e-15|+,+-,+-,->\n", - "-4.5554988879780214e-14|-,0,0,+>\n", - "-9.046394383090658e-15|-,0,0,->\n", - "5.73302164102497e-15|-,0,+-,->\n", - "5.997164123218936e-15|-,+-,0,->\n", - "-2.3197193329961056e-16|-,+-,+-,+>\n", - "1.3657486050700737e-16|-,+-,+-,->\n", - "Ending Cycle228\n", - "-2.0369251880690986e-16|+,0,0,+>\n", - "-8.409912347836008e-15|+,0,0,->\n", - "4.405493098487508e-15|+,0,+-,+>\n", - "4.295843950727993e-15|+,+-,0,+>\n", - "-1.7397701871205743e-16|+,+-,+-,+>\n", - "1.6467777502553082e-15|+,+-,+-,->\n", - "-4.561282619200685e-14|-,0,0,+>\n", - "-9.116097120247305e-15|-,0,0,->\n", - "5.708045575683189e-15|-,0,+-,->\n", - "5.929756652544067e-15|-,+-,0,->\n", - "-2.9640716106276065e-16|-,+-,+-,+>\n", - "1.953316295822282e-16|-,+-,+-,->\n", - "Ending Cycle229\n", - "-2.8356875338215245e-16|+,0,0,+>\n", - "-8.617897534055227e-15|+,0,0,->\n", - "4.385634862320708e-15|+,0,+-,+>\n", - "4.296289071045752e-15|+,+-,0,+>\n", - "-1.7109037719712368e-16|+,+-,+-,+>\n", - "1.7013944666512577e-15|+,+-,+-,->\n", - "-4.5674099334181576e-14|-,0,0,+>\n", - "-8.751225549446145e-15|-,0,0,->\n", - "5.683614067462107e-15|-,0,+-,->\n", - "5.842568632315401e-15|-,+-,0,->\n", - "-3.586769196238994e-16|-,+-,+-,+>\n", - "2.3580851243028723e-16|-,+-,+-,->\n", - "Ending Cycle230\n", - "-4.021695715285558e-16|+,0,0,+>\n", - "-8.861255963075145e-15|+,0,0,->\n", - "4.368666606017201e-15|+,0,+-,+>\n", - "4.277717761300837e-15|+,+-,0,+>\n", - "-1.680101213345032e-16|+,+-,+-,+>\n", - "1.7817665759393815e-15|+,+-,+-,->\n", - "-4.5845408705890057e-14|-,0,0,+>\n", - "-8.83298688974286e-15|-,0,0,->\n", - "5.665774489168447e-15|-,0,+-,->\n", - "5.7872353419137984e-15|-,+-,0,->\n", - "-4.414992623410431e-16|-,+-,+-,+>\n", - "2.6365136787114156e-16|-,+-,+-,->\n", - "Ending Cycle231\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 85%|████████▍ | 235/278 [00:14<00:02, 16.25it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-5.031078421771847e-16|+,0,0,+>\n", - "-9.083875164199408e-15|+,0,0,->\n", - "4.3198519440747315e-15|+,0,+-,+>\n", - "4.270621330083475e-15|+,+-,0,+>\n", - "-1.7220215840971607e-16|+,+-,+-,+>\n", - "1.8452687084657767e-15|+,+-,+-,->\n", - "-4.587159685748682e-14|-,0,0,+>\n", - "-8.832950546099394e-15|-,0,0,->\n", - "5.993028416415186e-15|-,0,+-,->\n", - "6.126415771879565e-15|-,+-,0,->\n", - "-4.714059467806678e-16|-,+-,+-,+>\n", - "3.1092518216839407e-16|-,+-,+-,->\n", - "Ending Cycle232\n", - "-6.410490790828812e-16|+,0,0,+>\n", - "-9.32057140674931e-15|+,0,0,->\n", - "4.2786527929144866e-15|+,0,+-,+>\n", - "4.243641017098901e-15|+,+-,0,+>\n", - "-1.8636294618965497e-16|+,+-,+-,+>\n", - "1.901828311226741e-15|+,+-,+-,->\n", - "-4.59904902483362e-14|-,0,0,+>\n", - "-8.842276730700468e-15|-,0,0,->\n", - "5.850338176874842e-15|-,0,+-,->\n", - "5.94979616461943e-15|-,+-,0,->\n", - "-4.420233011977347e-16|-,+-,+-,+>\n", - "3.472643235703325e-16|-,+-,+-,->\n", - "Ending Cycle233\n", - "-7.538660534669582e-16|+,0,0,+>\n", - "-9.544788373159559e-15|+,0,0,->\n", - "4.2422550129736385e-15|+,0,+-,+>\n", - "4.229084655193446e-15|+,+-,0,+>\n", - "-1.8270327970427864e-16|+,+-,+-,+>\n", - "1.954593610642393e-15|+,+-,+-,->\n", - "-4.610432782432291e-14|-,0,0,+>\n", - "-8.830426060556265e-15|-,0,0,->\n", - "5.844601644003936e-15|-,0,+-,->\n", - "5.894893298081167e-15|-,+-,0,->\n", - "-3.9417154162086715e-16|-,+-,+-,+>\n", - "4.019797541940749e-16|-,+-,+-,->\n", - "Ending Cycle234\n", - "-8.847411999384074e-16|+,0,0,+>\n", - "-9.78169341295819e-15|+,0,0,->\n", - "4.216293844218855e-15|+,0,+-,+>\n", - "4.211153150536005e-15|+,+-,0,+>\n", - "-1.9696906564987978e-16|+,+-,+-,+>\n", - "1.991495460482047e-15|+,+-,+-,->\n", - "-4.642765863013288e-14|-,0,0,+>\n", - "-8.821251605430835e-15|-,0,0,->\n", - "5.806839742976852e-15|-,0,+-,->\n", - "5.9027465920958935e-15|-,+-,0,->\n", - "-4.958777687308784e-16|-,+-,+-,+>\n", - "4.758107914399241e-16|-,+-,+-,->\n", - "Ending Cycle235\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 86%|████████▌ | 239/278 [00:15<00:02, 16.26it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-1.0305672244578281e-15|+,0,0,+>\n", - "-1.005459066798185e-14|+,0,0,->\n", - "4.2195246259847914e-15|+,0,+-,+>\n", - "4.207488073295726e-15|+,+-,0,+>\n", - "-2.0612552810561054e-16|+,+-,+-,+>\n", - "2.0464024593431793e-15|+,+-,+-,->\n", - "-4.652035623413772e-14|-,0,0,+>\n", - "-8.797745125953577e-15|-,0,0,->\n", - "5.794441082507733e-15|-,0,+-,->\n", - "5.831142620946991e-15|-,+-,0,->\n", - "-5.357775063327875e-16|-,+-,+-,+>\n", - "5.676873545504466e-16|-,+-,+-,->\n", - "Ending Cycle236\n", - "-1.2451621531935848e-15|+,0,0,+>\n", - "-1.0328322316757636e-14|+,0,0,->\n", - "4.1662533258232e-15|+,0,+-,+>\n", - "4.196712232920755e-15|+,+-,0,+>\n", - "-1.9745213736076195e-16|+,+-,+-,+>\n", - "2.1466566002913937e-15|+,+-,+-,->\n", - "-4.661540429081419e-14|-,0,0,+>\n", - "-8.751319305041772e-15|-,0,0,->\n", - "5.773075795097504e-15|-,0,+-,->\n", - "5.85359374064894e-15|-,+-,0,->\n", - "-5.411276666289084e-16|-,+-,+-,+>\n", - "6.31759306328451e-16|-,+-,+-,->\n", - "Ending Cycle237\n", - "-1.451647760208406e-15|+,0,0,+>\n", - "-1.0567925814481262e-14|+,0,0,->\n", - "4.509894790865049e-15|+,0,+-,+>\n", - "4.412853562002988e-15|+,+-,0,+>\n", - "-1.9231638463394748e-16|+,+-,+-,+>\n", - "2.5109571824593455e-15|+,+-,+-,->\n", - "-4.677666105326871e-14|-,0,0,+>\n", - "-8.822499851126014e-15|-,0,0,->\n", - "5.752908270821656e-15|-,0,+-,->\n", - "5.788565822813711e-15|-,+-,0,->\n", - "-6.040273824249794e-16|-,+-,+-,+>\n", - "7.455800505031046e-16|-,+-,+-,->\n", - "Ending Cycle238\n", - "-1.7315221832861314e-15|+,0,0,+>\n", - "-1.082354359829184e-14|+,0,0,->\n", - "4.510097991837291e-15|+,0,+-,+>\n", - "4.479569829676878e-15|+,+-,0,+>\n", - "-1.9802956635672764e-16|+,+-,+-,+>\n", - "2.5293991294486906e-15|+,+-,+-,->\n", - "-4.678880069472012e-14|-,0,0,+>\n", - "-8.908916044983106e-15|-,0,0,->\n", - "6.081042037502995e-15|-,0,+-,->\n", - "6.164897040295023e-15|-,+-,0,->\n", - "-6.08993900182626e-16|-,+-,+-,+>\n", - "8.075923843864545e-16|-,+-,+-,->\n", - "Ending Cycle239\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 87%|████████▋ | 243/278 [00:15<00:02, 16.56it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-2.0070612671740858e-15|+,0,0,+>\n", - "-1.108353606174801e-14|+,0,0,->\n", - "4.490962539618241e-15|+,0,+-,+>\n", - "4.557989847735692e-15|+,+-,0,+>\n", - "-2.0380854332688207e-16|+,+-,+-,+>\n", - "2.5400582037922765e-15|+,+-,+-,->\n", - "-4.6912515132856376e-14|-,0,0,+>\n", - "-8.992975503101579e-15|-,0,0,->\n", - "6.038124507287931e-15|-,0,+-,->\n", - "6.160272607293785e-15|-,+-,0,->\n", - "-6.253205164162585e-16|-,+-,+-,+>\n", - "9.09629013803176e-16|-,+-,+-,->\n", - "Ending Cycle240\n", - "-2.333237546552545e-15|+,0,0,+>\n", - "-1.1400533741268918e-14|+,0,0,->\n", - "4.504712224789092e-15|+,0,+-,+>\n", - "4.577764650074571e-15|+,+-,0,+>\n", - "-2.0425256546365378e-16|+,+-,+-,+>\n", - "2.5245231116929954e-15|+,+-,+-,->\n", - "-4.6906678880088545e-14|-,0,0,+>\n", - "-8.869708268734989e-15|-,0,0,->\n", - "6.020089626075665e-15|-,0,+-,->\n", - "6.116821576191925e-15|-,+-,0,->\n", - "-6.294559718959491e-16|-,+-,+-,+>\n", - "1.043088139154699e-15|-,+-,+-,->\n", - "Ending Cycle241\n", - "-2.6835953770392107e-15|+,0,0,+>\n", - "-1.1743180468747014e-14|+,0,0,->\n", - "4.468459335803128e-15|+,0,+-,+>\n", - "4.549579872207757e-15|+,+-,0,+>\n", - "-1.9969094991503985e-16|+,+-,+-,+>\n", - "2.498574975643799e-15|+,+-,+-,->\n", - "-4.6952533688750206e-14|-,0,0,+>\n", - "-8.754879884307304e-15|-,0,0,->\n", - "6.0093469661079136e-15|-,0,+-,->\n", - "6.079268118022278e-15|-,+-,0,->\n", - "-6.010196945160915e-16|-,+-,+-,+>\n", - "1.190583583948611e-15|-,+-,+-,->\n", - "Ending Cycle242\n", - "-3.157733201508925e-15|+,0,0,+>\n", - "-1.2241500664147979e-14|+,0,0,->\n", - "4.4741634231040125e-15|+,0,+-,+>\n", - "4.5197372197654126e-15|+,+-,0,+>\n", - "-2.0079627797296534e-16|+,+-,+-,+>\n", - "2.4806016294938406e-15|+,+-,+-,->\n", - "-4.691630523318516e-14|-,0,0,+>\n", - "-8.639706323237062e-15|-,0,0,->\n", - "5.97423714061805e-15|-,0,+-,->\n", - "6.0263344582842585e-15|-,+-,0,->\n", - "-5.463278441402008e-16|-,+-,+-,+>\n", - "1.3609026635415263e-15|-,+-,+-,->\n", - "Ending Cycle243\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 89%|████████▉ | 247/278 [00:15<00:01, 16.36it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-3.624577320433589e-15|+,0,0,+>\n", - "-1.2486149239873653e-14|+,0,0,->\n", - "4.442481491807262e-15|+,0,+-,+>\n", - "4.514892969741783e-15|+,+-,0,+>\n", - "-2.0060543315974952e-16|+,+-,+-,+>\n", - "2.517282189661746e-15|+,+-,+-,->\n", - "-4.687585804380862e-14|-,0,0,+>\n", - "-8.494138821239594e-15|-,0,0,->\n", - "5.975845964300671e-15|-,0,+-,->\n", - "5.981720811246359e-15|-,+-,0,->\n", - "-5.198445656502423e-16|-,+-,+-,+>\n", - "1.6068883021686087e-15|-,+-,+-,->\n", - "Ending Cycle244\n", - "-4.168110503523065e-15|+,0,0,+>\n", - "-1.272002564122291e-14|+,0,0,->\n", - "4.403064657906123e-15|+,0,+-,+>\n", - "4.457001248028696e-15|+,+-,0,+>\n", - "-2.010650987041234e-16|+,+-,+-,+>\n", - "2.5309275018427203e-15|+,+-,+-,->\n", - "-4.7004564843007283e-14|-,0,0,+>\n", - "-8.331621698170732e-15|-,0,0,->\n", - "5.9559285131295934e-15|-,0,+-,->\n", - "5.9429884574595565e-15|-,+-,0,->\n", - "-5.788009656098313e-16|-,+-,+-,+>\n", - "1.831679404795516e-15|-,+-,+-,->\n", - "Ending Cycle245\n", - "-4.76934736666142e-15|+,0,0,+>\n", - "-1.2958458623274767e-14|+,0,0,->\n", - "4.3844638410221716e-15|+,0,+-,+>\n", - "4.6687354003441716e-15|+,+-,0,+>\n", - "-2.18695194536582e-16|+,+-,+-,+>\n", - "2.5670328749154524e-15|+,+-,+-,->\n", - "-4.7144321777777845e-14|-,0,0,+>\n", - "-8.406008771963258e-15|-,0,0,->\n", - "5.961411573785864e-15|-,0,+-,->\n", - "5.8943751084625135e-15|-,+-,0,->\n", - "-6.586155533340775e-16|-,+-,+-,+>\n", - "2.11388900020464e-15|-,+-,+-,->\n", - "Ending Cycle246\n", - "-5.498687428700235e-15|+,0,0,+>\n", - "-1.32293031810134e-14|+,0,0,->\n", - "4.761567702868114e-15|+,0,+-,+>\n", - "5.009378961351928e-15|+,+-,0,+>\n", - "-2.0315247982332383e-16|+,+-,+-,+>\n", - "2.566817803404933e-15|+,+-,+-,->\n", - "-4.7251880134506454e-14|-,0,0,+>\n", - "-8.480475986206876e-15|-,0,0,->\n", - "5.890463923041733e-15|-,0,+-,->\n", - "5.833431044018475e-15|-,+-,0,->\n", - "-6.88465434620018e-16|-,+-,+-,+>\n", - "2.4759249370656044e-15|-,+-,+-,->\n", - "Ending Cycle247\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 90%|█████████ | 251/278 [00:15<00:01, 16.44it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-6.378007977029086e-15|+,0,0,+>\n", - "-1.3517654533179348e-14|+,0,0,->\n", - "4.702752378834814e-15|+,0,+-,+>\n", - "4.955618442324046e-15|+,+-,0,+>\n", - "-2.134726510335776e-16|+,+-,+-,+>\n", - "2.550478917862708e-15|+,+-,+-,->\n", - "-4.733610808900658e-14|-,0,0,+>\n", - "-8.541860506732513e-15|-,0,0,->\n", - "6.304466991011663e-15|-,0,+-,->\n", - "6.139330412055529e-15|-,+-,0,->\n", - "-6.896345939752417e-16|-,+-,+-,+>\n", - "2.8753392994096315e-15|-,+-,+-,->\n", - "Ending Cycle248\n", - "-7.379379882658774e-15|+,0,0,+>\n", - "-1.3802514007894264e-14|+,0,0,->\n", - "4.667635544479246e-15|+,0,+-,+>\n", - "4.88533676802418e-15|+,+-,0,+>\n", - "-1.9998806534114874e-16|+,+-,+-,+>\n", - "2.5531356560261786e-15|+,+-,+-,->\n", - "-4.746980943602791e-14|-,0,0,+>\n", - "-8.615357087607992e-15|-,0,0,->\n", - "6.2462710890923006e-15|-,0,+-,->\n", - "6.095982247093125e-15|-,+-,0,->\n", - "-7.396240222623209e-16|-,+-,+-,+>\n", - "3.3389935928627332e-15|-,+-,+-,->\n", - "Ending Cycle249\n", - "-8.524811914078347e-15|+,0,0,+>\n", - "-1.403726816789085e-14|+,0,0,->\n", - "4.696923438350942e-15|+,0,+-,+>\n", - "4.9101745217747026e-15|+,+-,0,+>\n", - "-2.2076141107513336e-16|+,+-,+-,+>\n", - "2.5531553560334665e-15|+,+-,+-,->\n", - "-4.755917978599437e-14|-,0,0,+>\n", - "-8.693927200505117e-15|-,0,0,->\n", - "6.247262418911503e-15|-,0,+-,->\n", - "6.091796232063591e-15|-,+-,0,->\n", - "-7.887367017274953e-16|-,+-,+-,+>\n", - "3.8560773092978016e-15|-,+-,+-,->\n", - "Ending Cycle250\n", - "-9.855837929778837e-15|+,0,0,+>\n", - "-1.428345691904766e-14|+,0,0,->\n", - "4.631351226048384e-15|+,0,+-,+>\n", - "4.824399488276818e-15|+,+-,0,+>\n", - "-2.1971365312487608e-16|+,+-,+-,+>\n", - "2.617109698282967e-15|+,+-,+-,->\n", - "-4.759137866492296e-14|-,0,0,+>\n", - "-8.759853706434656e-15|-,0,0,->\n", - "6.205527637324835e-15|-,0,+-,->\n", - "6.039937706822452e-15|-,+-,0,->\n", - "-7.639076431130027e-16|-,+-,+-,+>\n", - "4.5084241026510965e-15|-,+-,+-,->\n", - "Ending Cycle251\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 92%|█████████▏| 255/278 [00:16<00:01, 16.61it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-1.1402233485096135e-14|+,0,0,+>\n", - "-1.4585172545143094e-14|+,0,0,->\n", - "4.615239143042809e-15|+,0,+-,+>\n", - "4.752771862729001e-15|+,+-,0,+>\n", - "-2.052374899711078e-16|+,+-,+-,+>\n", - "2.6188477037555664e-15|+,+-,+-,->\n", - "-4.762355648425366e-14|-,0,0,+>\n", - "-8.823705008621905e-15|-,0,0,->\n", - "6.181846235760318e-15|-,0,+-,->\n", - "5.96913716802986e-15|-,+-,0,->\n", - "-7.071082980773976e-16|-,+-,+-,+>\n", - "5.260930917906502e-15|-,+-,+-,->\n", - "Ending Cycle252\n", - "-1.3220856872608477e-14|+,0,0,+>\n", - "-1.4920980904432626e-14|+,0,0,->\n", - "4.519630315935217e-15|+,0,+-,+>\n", - "4.519529568144677e-15|+,+-,0,+>\n", - "-2.116825276343467e-16|+,+-,+-,+>\n", - "2.6265743033246133e-15|+,+-,+-,->\n", - "-4.765278631596955e-14|-,0,0,+>\n", - "-8.906779501837125e-15|-,0,0,->\n", - "6.132908196880787e-15|-,0,+-,->\n", - "5.995859190027067e-15|-,+-,0,->\n", - "-7.294174476471215e-16|-,+-,+-,+>\n", - "6.107302241488755e-15|-,+-,+-,->\n", - "Ending Cycle253\n", - "-1.5345108260821402e-14|+,0,0,+>\n", - "-1.52245456118771e-14|+,0,0,->\n", - "4.522982713948329e-15|+,0,+-,+>\n", - "4.5479002153231714e-15|+,+-,0,+>\n", - "-2.0934643819763244e-16|+,+-,+-,+>\n", - "2.6151699016697896e-15|+,+-,+-,->\n", - "-4.765160525613844e-14|-,0,0,+>\n", - "-8.96643839480924e-15|-,0,0,->\n", - "6.068771700071348e-15|-,0,+-,->\n", - "5.91672271706645e-15|-,+-,0,->\n", - "-7.3108730566011605e-16|-,+-,+-,+>\n", - "7.115995880826192e-15|-,+-,+-,->\n", - "Ending Cycle254\n", - "-1.785892158305053e-14|+,0,0,+>\n", - "-1.553099606901878e-14|+,0,0,->\n", - "4.479777290430323e-15|+,0,+-,+>\n", - "4.5521390430565505e-15|+,+-,0,+>\n", - "-2.128711542510694e-16|+,+-,+-,+>\n", - "2.593951662341299e-15|+,+-,+-,->\n", - "-4.7765464026760456e-14|-,0,0,+>\n", - "-9.041159806987233e-15|-,0,0,->\n", - "6.454870678367108e-15|-,0,+-,->\n", - "6.287694739147078e-15|-,+-,0,->\n", - "-7.636287241207083e-16|-,+-,+-,+>\n", - "8.277579048296694e-15|-,+-,+-,->\n", - "Ending Cycle255\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 93%|█████████▎| 259/278 [00:16<00:01, 16.05it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-2.0766030241741462e-14|+,0,0,+>\n", - "-1.5811584248610464e-14|+,0,0,->\n", - "4.475234814908973e-15|+,0,+-,+>\n", - "4.572375040337059e-15|+,+-,0,+>\n", - "-1.9936567839171016e-16|+,+-,+-,+>\n", - "2.653319576344743e-15|+,+-,+-,->\n", - "-4.7876953209080496e-14|-,0,0,+>\n", - "-9.099560907471988e-15|-,0,0,->\n", - "6.2831830419414434e-15|-,0,+-,->\n", - "6.102470184267246e-15|-,+-,0,->\n", - "-7.638133029050709e-16|-,+-,+-,+>\n", - "9.645832580587082e-15|-,+-,+-,->\n", - "Ending Cycle256\n", - "-2.4182052830409664e-14|+,0,0,+>\n", - "-1.612950044372108e-14|+,0,0,->\n", - "4.485951909504535e-15|+,0,+-,+>\n", - "4.592347918921254e-15|+,+-,0,+>\n", - "-2.0382273175684442e-16|+,+-,+-,+>\n", - "2.7812693072404794e-15|+,+-,+-,->\n", - "-4.7975746331344814e-14|-,0,0,+>\n", - "-9.167548439194573e-15|-,0,0,->\n", - "6.298796517069326e-15|-,0,+-,->\n", - "6.090552392931173e-15|-,+-,0,->\n", - "-7.682232401092411e-16|-,+-,+-,+>\n", - "1.1281350161313192e-14|-,+-,+-,->\n", - "Ending Cycle257\n", - "-2.8165619564079037e-14|+,0,0,+>\n", - "-1.6441948213955836e-14|+,0,0,->\n", - "4.504233165729848e-15|+,0,+-,+>\n", - "4.64962033092143e-15|+,+-,0,+>\n", - "-1.8168312331138518e-16|+,+-,+-,+>\n", - "2.4470997864272063e-15|+,+-,+-,->\n", - "-4.805845986314925e-14|-,0,0,+>\n", - "-9.242388622410042e-15|-,0,0,->\n", - "6.333647567386689e-15|-,0,+-,->\n", - "6.046794738752166e-15|-,+-,0,->\n", - "-8.088328179191702e-16|-,+-,+-,+>\n", - "1.3168261688350955e-14|-,+-,+-,->\n", - "Ending Cycle258\n", - "-3.277682922944539e-14|+,0,0,+>\n", - "-1.676006915866141e-14|+,0,0,->\n", - "4.4749077610876455e-15|+,0,+-,+>\n", - "4.604296284394559e-15|+,+-,0,+>\n", - "-1.9813072833339266e-16|+,+-,+-,+>\n", - "2.491487230990639e-15|+,+-,+-,->\n", - "-4.8069551904159145e-14|-,0,0,+>\n", - "-9.302621977663345e-15|-,0,0,->\n", - "6.322482630353419e-15|-,0,+-,->\n", - "6.093510937517517e-15|-,+-,0,->\n", - "-7.734395542710897e-16|-,+-,+-,+>\n", - "1.5406289342787774e-14|-,+-,+-,->\n", - "Ending Cycle259\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 95%|█████████▍| 263/278 [00:16<00:00, 16.26it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-3.821314628881924e-14|+,0,0,+>\n", - "-1.702829059056966e-14|+,0,0,->\n", - "4.538419819383059e-15|+,0,+-,+>\n", - "4.579088413333334e-15|+,+-,0,+>\n", - "-1.8286201559054037e-16|+,+-,+-,+>\n", - "2.537886655986334e-15|+,+-,+-,->\n", - "-4.8135589975119973e-14|-,0,0,+>\n", - "-9.27358405109401e-15|-,0,0,->\n", - "6.277847817794939e-15|-,0,+-,->\n", - "6.054545083916225e-15|-,+-,0,->\n", - "-8.001277347226789e-16|-,+-,+-,+>\n", - "1.7995445614706218e-14|-,+-,+-,->\n", - "Ending Cycle260\n", - "-4.459400417891473e-14|+,0,0,+>\n", - "-1.763930774395474e-14|+,0,0,->\n", - "4.550885957162425e-15|+,0,+-,+>\n", - "4.563835395225732e-15|+,+-,0,+>\n", - "-1.845642340363288e-16|+,+-,+-,+>\n", - "2.55725495258813e-15|+,+-,+-,->\n", - "-4.8293159263317555e-14|-,0,0,+>\n", - "-9.123725064165149e-15|-,0,0,->\n", - "6.232980897827794e-15|-,0,+-,->\n", - "6.0637516466489414e-15|-,+-,0,->\n", - "-7.942468215520856e-16|-,+-,+-,+>\n", - "2.1056915684647833e-14|-,+-,+-,->\n", - "Ending Cycle261\n", - "-5.202804182365162e-14|+,0,0,+>\n", - "-1.8211710345037748e-14|+,0,0,->\n", - "4.568876164250348e-15|+,0,+-,+>\n", - "4.799222873993752e-15|+,+-,0,+>\n", - "-1.6425626143896883e-16|+,+-,+-,+>\n", - "2.5841994864041106e-15|+,+-,+-,->\n", - "-4.8350836378084874e-14|-,0,0,+>\n", - "-8.110703917112836e-15|-,0,0,->\n", - "6.155881367430608e-15|-,0,+-,->\n", - "6.041175975079688e-15|-,+-,0,->\n", - "-8.338963640125815e-16|-,+-,+-,+>\n", - "2.466137616646767e-14|-,+-,+-,->\n", - "Ending Cycle262\n", - "-6.071086118468785e-14|+,0,0,+>\n", - "-1.8563942364395185e-14|+,0,0,->\n", - "4.537910627685358e-15|+,0,+-,+>\n", - "4.721840444744152e-15|+,+-,0,+>\n", - "-4.199612593714837e-16|+,+-,+-,+>\n", - "2.5922276968483515e-15|+,+-,+-,->\n", - "-4.839378806961348e-14|-,0,0,+>\n", - "-8.179064897896921e-15|-,0,0,->\n", - "6.076888566361757e-15|-,0,+-,->\n", - "6.0378507507708945e-15|-,+-,0,->\n", - "-8.870475862990372e-16|-,+-,+-,+>\n", - "2.8909259198675655e-14|-,+-,+-,->\n", - "Ending Cycle263\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 96%|█████████▌| 267/278 [00:16<00:00, 16.19it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-7.098596660009963e-14|+,0,0,+>\n", - "-1.8897056469923627e-14|+,0,0,->\n", - "4.58015053655003e-15|+,0,+-,+>\n", - "4.629010309336211e-15|+,+-,0,+>\n", - "-5.649589074237941e-16|+,+-,+-,+>\n", - "2.6173019989063258e-15|+,+-,+-,->\n", - "-4.847392493370124e-14|-,0,0,+>\n", - "-7.208202805714444e-15|-,0,0,->\n", - "6.0756591174137155e-15|-,0,+-,->\n", - "5.984967623277023e-15|-,+-,0,->\n", - "-8.812479904185068e-16|-,+-,+-,+>\n", - "3.394072285667301e-14|-,+-,+-,->\n", - "Ending Cycle264\n", - "-8.309302794843106e-14|+,0,0,+>\n", - "-1.9202358725732136e-14|+,0,0,->\n", - "4.572867448127816e-15|+,0,+-,+>\n", - "4.583092502474754e-15|+,+-,0,+>\n", - "-6.009164878449746e-16|+,+-,+-,+>\n", - "2.661447627375303e-15|+,+-,+-,->\n", - "-4.84980691595412e-14|-,0,0,+>\n", - "-6.640408663313066e-15|-,0,0,->\n", - "5.944118630256751e-15|-,0,+-,->\n", - "5.991942115322274e-15|-,+-,0,->\n", - "-8.500553621652629e-16|-,+-,+-,+>\n", - "3.987980706764818e-14|-,+-,+-,->\n", - "Ending Cycle265\n", - "-9.72461875445579e-14|+,0,0,+>\n", - "-1.9494866742479534e-14|+,0,0,->\n", - "4.590505077285537e-15|+,0,+-,+>\n", - "4.517613644576712e-15|+,+-,0,+>\n", - "-6.63871655891987e-16|+,+-,+-,+>\n", - "2.7168297643609604e-15|+,+-,+-,->\n", - "-4.846387505722449e-14|-,0,0,+>\n", - "-6.2202195459244606e-15|-,0,0,->\n", - "5.8790541706258925e-15|-,0,+-,->\n", - "5.931682740472367e-15|-,+-,0,->\n", - "-8.59798783256941e-16|-,+-,+-,+>\n", - "4.6895463600742486e-14|-,+-,+-,->\n", - "Ending Cycle266\n", - "-1.140247901523153e-13|+,0,0,+>\n", - "-1.9813205886131488e-14|+,0,0,->\n", - "5.08141057149792e-15|+,0,+-,+>\n", - "4.896396475126728e-15|+,+-,0,+>\n", - "-6.83692991128412e-16|+,+-,+-,+>\n", - "2.759809655045178e-15|+,+-,+-,->\n", - "-4.849617610488178e-14|-,0,0,+>\n", - "-6.02477950000035e-15|-,0,0,->\n", - "5.812756516442215e-15|-,0,+-,->\n", - "5.9280409859963856e-15|-,+-,0,->\n", - "-8.935401958272332e-16|-,+-,+-,+>\n", - "5.515996572480348e-14|-,+-,+-,->\n", - "Ending Cycle267\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 97%|█████████▋| 271/278 [00:17<00:00, 16.17it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-1.3378430643653647e-13|+,0,0,+>\n", - "-2.015749846905298e-14|+,0,0,->\n", - "5.0125218262836614e-15|+,0,+-,+>\n", - "4.863700702959917e-15|+,+-,0,+>\n", - "-6.7700018680074415e-16|+,+-,+-,+>\n", - "2.758329822623203e-15|+,+-,+-,->\n", - "-4.865194758634103e-14|-,0,0,+>\n", - "-5.906361898972965e-15|-,0,0,->\n", - "6.275657996387363e-15|-,0,+-,->\n", - "6.321070315267393e-15|-,+-,0,->\n", - "-1.0065952176480827e-15|-,+-,+-,+>\n", - "6.498507857648169e-14|-,+-,+-,->\n", - "Ending Cycle268\n", - "-1.570795238673764e-13|+,0,0,+>\n", - "-2.0498805334188703e-14|+,0,0,->\n", - "5.067861181132533e-15|+,0,+-,+>\n", - "4.763217759748483e-15|+,+-,0,+>\n", - "-6.437254118506119e-16|+,+-,+-,+>\n", - "2.774294458766728e-15|+,+-,+-,->\n", - "-4.874588817761217e-14|-,0,0,+>\n", - "-5.811965282992906e-15|-,0,0,->\n", - "6.2362948532812395e-15|-,0,+-,->\n", - "6.289232262610799e-15|-,+-,0,->\n", - "-1.0836604710654567e-15|-,+-,+-,+>\n", - "7.659768805176042e-14|-,+-,+-,->\n", - "Ending Cycle269\n", - "-1.8468607698774005e-13|+,0,0,+>\n", - "-2.0793147605456513e-14|+,0,0,->\n", - "5.044257042276899e-15|+,0,+-,+>\n", - "4.732346036152309e-15|+,+-,0,+>\n", - "-6.015648201162365e-16|+,+-,+-,+>\n", - "2.8229311468488636e-15|+,+-,+-,->\n", - "-4.8816332578422923e-14|-,0,0,+>\n", - "-5.836128721804725e-15|-,0,0,->\n", - "6.10993047372714e-15|-,0,+-,->\n", - "6.292660936747321e-15|-,+-,0,->\n", - "-1.0541481466961505e-15|-,+-,+-,+>\n", - "9.042414512240343e-14|-,+-,+-,->\n", - "Ending Cycle270\n", - "-2.173410340651252e-13|+,0,0,+>\n", - "-2.114019710901895e-14|+,0,0,->\n", - "5.006808498834495e-15|+,0,+-,+>\n", - "4.701792546683826e-15|+,+-,0,+>\n", - "-6.085174782804504e-16|+,+-,+-,+>\n", - "2.8521710381212305e-15|+,+-,+-,->\n", - "-4.870562115915574e-14|-,0,0,+>\n", - "-5.85500226726638e-15|-,0,0,->\n", - "6.083205939450668e-15|-,0,+-,->\n", - "6.3201547107664934e-15|-,+-,0,->\n", - "-1.0487354031049536e-15|-,+-,+-,+>\n", - "1.068012268248858e-13|-,+-,+-,->\n", - "Ending Cycle271\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=200, tau=0.01, energy~-1.859368: 99%|█████████▉| 275/278 [00:17<00:00, 16.36it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-2.559694806962366e-13|+,0,0,+>\n", - "-2.1479559499368054e-14|+,0,0,->\n", - "4.948001239749996e-15|+,0,+-,+>\n", - "4.804104837543364e-15|+,+-,0,+>\n", - "-1.772296527976595e-15|+,+-,+-,+>\n", - "2.8497016417996255e-15|+,+-,+-,->\n", - "-4.882687514821802e-14|-,0,0,+>\n", - "-5.866745816335311e-15|-,0,0,->\n", - "5.942972194242288e-15|-,0,+-,->\n", - "6.27193593981377e-15|-,+-,0,->\n", - "-1.0800668114369425e-15|-,+-,+-,+>\n", - "1.2620475242459532e-13|-,+-,+-,->\n", - "Ending Cycle272\n", - "-3.017618402580019e-13|+,0,0,+>\n", - "-2.1851471782697118e-14|+,0,0,->\n", - "4.785144891867075e-15|+,0,+-,+>\n", - "4.791942878829826e-15|+,+-,0,+>\n", - "-2.525598398979487e-15|+,+-,+-,+>\n", - "2.854800957734322e-15|+,+-,+-,->\n", - "-4.8865384160565357e-14|-,0,0,+>\n", - "-5.861918045096043e-15|-,0,0,->\n", - "6.031624725851295e-15|-,0,+-,->\n", - "6.229759280822457e-15|-,+-,0,->\n", - "-1.0586267755655922e-15|-,+-,+-,+>\n", - "1.4931419686691696e-13|-,+-,+-,->\n", - "Ending Cycle273\n", - "-3.5605746849475526e-13|+,0,0,+>\n", - "-2.219732234553515e-14|+,0,0,->\n", - "4.803636388727251e-15|+,0,+-,+>\n", - "4.6950078519311766e-15|+,+-,0,+>\n", - "-2.8936924478998853e-15|+,+-,+-,+>\n", - "2.8702515639333872e-15|+,+-,+-,->\n", - "-4.904845182647273e-14|-,0,0,+>\n", - "-5.902145231271085e-15|-,0,0,->\n", - "6.019465148144255e-15|-,0,+-,->\n", - "6.163294679079051e-15|-,+-,0,->\n", - "-1.1189401781784132e-15|-,+-,+-,+>\n", - "1.7683020306052258e-13|-,+-,+-,->\n", - "Ending Cycle274\n", - "-4.2049617614625633e-13|+,0,0,+>\n", - "-2.2542204845917815e-14|+,0,0,->\n", - "4.804476192067437e-15|+,0,+-,+>\n", - "4.598238986697228e-15|+,+-,0,+>\n", - "-3.0915494024086227e-15|+,+-,+-,+>\n", - "2.88492189045346e-15|+,+-,+-,->\n", - "-4.894569538406984e-14|-,0,0,+>\n", - "-5.97303545978269e-15|-,0,0,->\n", - "5.975710177269448e-15|-,0,+-,->\n", - "6.125371063597666e-15|-,+-,0,->\n", - "-1.1240279139875316e-15|-,+-,+-,+>\n", - "2.0953294810282257e-13|-,+-,+-,->\n", - "Ending Cycle275\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\r", - "n=200, tau=0.01, energy~-1.859368: 100%|█████████▉| 277/278 [00:17<00:00, 16.34it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-4.970630886439492e-13|+,0,0,+>\n", - "-2.2899176516771412e-14|+,0,0,->\n", - "4.764000256724111e-15|+,0,+-,+>\n", - "4.557478413685605e-15|+,+-,0,+>\n", - "-3.1509527855583506e-15|+,+-,+-,+>\n", - "2.9387665536932204e-15|+,+-,+-,->\n", - "-4.8919356438333355e-14|-,0,0,+>\n", - "-6.367766432596705e-15|-,0,0,->\n", - "5.964380392703633e-15|-,0,+-,->\n", - "6.069855148354943e-15|-,+-,0,->\n", - "-1.1092144232605145e-15|-,+-,+-,+>\n", - "2.4849798904831247e-13|-,+-,+-,->\n", - "Ending Cycle276\n", - "-5.880785748200001e-13|+,0,0,+>\n", - "-2.3266726470968755e-14|+,0,0,->\n", - "4.706854614670875e-15|+,0,+-,+>\n", - "4.6190369119391116e-15|+,+-,0,+>\n", - "-3.106230227801349e-15|+,+-,+-,+>\n", - "2.9435512270576704e-15|+,+-,+-,->\n", - "-4.8893829703476184e-14|-,0,0,+>\n", - "-6.485976264868538e-15|-,0,0,->\n", - "6.368177278100742e-15|-,0,+-,->\n", - "6.481473619627083e-15|-,+-,0,->\n", - "-1.0687068000880715e-15|-,+-,+-,+>\n", - "2.949747148006921e-13|-,+-,+-,->\n", - "Ending Cycle277\n", - "-6.963014796187964e-13|+,0,0,+>\n", - "-2.3625832428729458e-14|+,0,0,->\n", - "4.728179338640727e-15|+,0,+-,+>\n", - "4.416285621726505e-15|+,+-,0,+>\n", - "-2.996260173086741e-15|+,+-,+-,+>\n", - "2.7307376925077217e-15|+,+-,+-,->\n", - "-4.892304921331221e-14|-,0,0,+>\n", - "-6.5941769150165045e-15|-,0,0,->\n", - "6.182014153190545e-15|-,0,+-,->\n", - "6.325755682777613e-15|-,+-,0,->\n", - "-1.0539160374646244e-15|-,+-,+-,+>\n", - "3.504128336606458e-13|-,+-,+-,->\n", - "Ending Cycle278\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=278, tau=0.01, energy~-1.900219: 100%|██████████| 278/278 [00:17<00:00, 15.74it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(2, 2)\n", - "(0, 0): SZ=0.21, N=1.00\n", - "(1, 0): SZ=-0.14, N=1.00\n", - "(2, 0): SZ=0.14, N=1.00\n", - "(3, 0): SZ=-0.21, N=1.00\n", - "0.9999928215228403\n" - ] - } - ], - "source": [ - "from pyblock3.algebra.fermion import SparseFermionTensor\n", - "np.random.seed(3)\n", - "\n", - " \n", - "def callback(su):\n", - " psi1 = su.get_state()\n", - " #for itsr in psi1:\n", - " # itsr.data.data[abs(itsr.data.data)<1e-9] =0\n", - " state = psi1.contract(all)\n", - " tsr = state.data.to_sparse()\n", - " \n", - " need_print = False\n", - " for blk in tsr.blocks:\n", - " need_print = need_print or print_block(blk)\n", - " if need_print:\n", - " print(\"Ending Cycle%i\"%su._n)\n", - "\n", - " \n", - "su = SimpleUpdate(\n", - " psi,\n", - " Ham,\n", - " chi=128, # boundary contraction bond dim for computing energy\n", - " D = 4,\n", - " compute_energy_every=100,\n", - " compute_energy_per_site=False,\n", - " keep_best=True,\n", - " ordering = 'sort',\n", - " gauge_smudge = 1e-6,\n", - " callback=callback#,\n", - " #gate_opts = {'cutoff': 1e-6}\n", - ")\n", - "tau = 0.01\n", - "su.evolve(278, tau=tau)\n", - "\n", - "second_dense = min(Lx, Ly)>1\n", - "sz_expecs = su.get_state().compute_local_expectation(sz_ops, return_all=True, second_dense=second_dense,normalized=True)\n", - "n_expecs = su.get_state().compute_local_expectation(n_ops, return_all=True, second_dense=second_dense,normalized=True)\n", - "\n", - "print(su.get_state()[0,0].shape)\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.2f, N=%.2f\"%(ix,iy,sz_expecs[(ix,iy)][0]/sz_expecs[(ix,iy)][1], n_expecs[(ix,iy)][0]/n_expecs[(ix,iy)][1]))\n", - "\n", - "norm = su.get_state().compute_norm()\n", - "print(norm)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=0, tau=0.01, energy~-0.115293: 100%|██████████| 1/1 [00:00<00:00, 7.27it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[-0.09899956 0. 0. -0.09949088 -1.00486561 0.\n", - " 0. 0.00999741]\n", - "[ 9.90067826e-02 0.00000000e+00 0.00000000e+00 0.00000000e+00\n", - " 1.66397683e-21 9.90018325e-02 0.00000000e+00 0.00000000e+00\n", - " 0.00000000e+00 0.00000000e+00 -2.16058167e-17 9.82587235e-04\n", - " 0.00000000e+00 0.00000000e+00 -9.87587018e-04 2.65248113e-17\n", - " 0.00000000e+00 9.80046210e-02 9.85057004e-02 0.00000000e+00\n", - " 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n", - " 0.00000000e+00 -6.97948764e-04 6.94547727e-04 0.00000000e+00\n", - " -9.85873202e-04 -1.31157586e-19 1.31863282e-19 -9.39316310e-20\n", - " 9.84958503e-03 0.00000000e+00 0.00000000e+00 0.00000000e+00\n", - " -2.04271564e-18 0.00000000e+00 -9.89944913e-03 0.00000000e+00\n", - " 0.00000000e+00 2.08552111e-24 0.00000000e+00 9.87491063e-03\n", - " 0.00000000e+00 9.87467063e-03 0.00000000e+00 -2.34018531e-20\n", - " 0.00000000e+00 -9.95013534e-01 -9.90032584e-03 0.00000000e+00\n", - " -9.91675546e-03 0.00000000e+00 0.00000000e+00 0.00000000e+00\n", - " 0.00000000e+00 6.78626029e-09 7.00357228e-07 -1.24233787e-18\n", - " 0.00000000e+00 2.58073620e-24 7.95246666e-24 7.62729032e-19]\n", - "[ 0.00000000e+00 -9.90018319e-02 -9.90067821e-02 2.48196916e-17\n", - " 1.22978667e-21 -2.29033301e-33 -2.67931173e-24 4.98980748e-36\n", - " 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n", - " -9.87634268e-04 2.62446621e-21 8.07043985e-20 9.82634246e-04\n", - " 9.95013757e-01 -2.20947946e-16 0.00000000e+00 -9.91674876e-03\n", - " 6.80295293e-09 -2.07660221e-24 -2.55214709e-26 7.42332876e-42\n", - " 0.00000000e+00 -9.90009365e-03 0.00000000e+00 0.00000000e+00\n", - " 5.63779624e-19 -7.02062981e-07 4.92449456e-19 2.54118657e-20\n", - " 0.00000000e+00 0.00000000e+00 0.00000000e+00 9.84982920e-03\n", - " 9.87562176e-03 2.59254673e-21 -2.45959139e-20 -2.77423702e-21\n", - " 0.00000000e+00 -9.89896655e-03 0.00000000e+00 -1.69238397e-17\n", - " 2.44302787e-20 2.33566701e-30 9.87513553e-03 -5.08866652e-33\n", - " 0.00000000e+00 9.80070731e-02 0.00000000e+00 0.00000000e+00\n", - " 0.00000000e+00 6.99682736e-04 -1.37078975e-19 3.77945883e-24\n", - " 9.85009209e-02 0.00000000e+00 0.00000000e+00 0.00000000e+00\n", - " -6.96222049e-04 -2.76805727e-23 4.88750275e-23 -9.85780241e-04]\n", - "[-0.09899709 0. 0. -0.09949571 0. 1.00486538\n", - " 0.00999764 0. ]\n", - "-1.4800135514959399e-18|-,0,0,+>\n", - "-1.1289268594434865e-19|-,+-,+-,+>\n", - "Ending Cycle1\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=1, tau=0.01, energy~-0.221545: 100%|██████████| 1/1 [00:00<00:00, 3.90it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(2, 2)\n", - "(0, 0): SZ=0.50, N=1.00\n", - "(1, 0): SZ=-0.50, N=1.00\n", - "(2, 0): SZ=0.50, N=1.00\n", - "(3, 0): SZ=-0.50, N=1.00\n", - "0.9999958480439124\n" - ] - } - ], - "source": [ - "psi1 = su.get_state()\n", - "\n", - "for itsr in psi1:\n", - " print(itsr.data.data)\n", - "\n", - "su1 = SimpleUpdate(\n", - " psi1,\n", - " Ham,\n", - " chi=128, # boundary contraction bond dim for computing energy\n", - " D = 4,\n", - " compute_energy_every=100,\n", - " compute_energy_per_site=False,\n", - " keep_best=True,\n", - " ordering = 'sort',\n", - " gauge_smudge = 1e-6,\n", - " callback=callback#,\n", - " #gate_opts = {'cutoff': 1e-6}\n", - ")\n", - "tau = 0.01\n", - "su1.evolve(1, tau=tau)\n", - "\n", - "second_dense = min(Lx, Ly)>1\n", - "sz_expecs = su1.get_state().compute_local_expectation(sz_ops, return_all=True, second_dense=second_dense,normalized=True)\n", - "n_expecs = su1.get_state().compute_local_expectation(n_ops, return_all=True, second_dense=second_dense,normalized=True)\n", - "\n", - "print(su1.get_state()[0,0].shape)\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.2f, N=%.2f\"%(ix,iy,sz_expecs[(ix,iy)][0]/sz_expecs[(ix,iy)][1], n_expecs[(ix,iy)][0]/n_expecs[(ix,iy)][1]))\n", - "\n", - "norm = su1.get_state().compute_norm()\n", - "print(norm)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "psi1 = su.get_state()\n", - "state = psi1.contract(all)\n", - "#state.data.data[abs(state.data.data)<1e-10] = 0\n", - "tsr = state.data.to_sparse()\n", - "for iblk in tsr:\n", - " print_block(iblk)\n", - "\n", - "norm = psi1.compute_norm()\n", - "\n", - "for where, G in n_ops.items():\n", - " ket = psi1.copy()\n", - " ket.add_tag(\"KET\")\n", - " bra = ket.H\n", - " bra.retag({\"KET\": \"BRA\"})\n", - " bra.mangle_inner_(\"*\")\n", - " ket.gate_(G, where, contract=True)\n", - " tn = ket & bra\n", - " out = tn.contract(all)\n", - " print(where, out/norm)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/examples/debug.py b/docs/examples/debug.py deleted file mode 100644 index ed388e0d..00000000 --- a/docs/examples/debug.py +++ /dev/null @@ -1,105 +0,0 @@ -import numpy as np -import itertools -from quimb.tensor.fermion_2d_tebd import Hubbard2D, SimpleUpdate -from pyblock3.algebra import fermion_operators as ops - -t=1 -u=4 -Lx = 2 -Ly = 2 -mu = -0.9 -mu = 0 -Ham = Hubbard2D(t, u, Lx, Ly, mu=mu) -#efci = -5.702748483462062 - -state_array = np.zeros([Lx,Ly]) -#state_array[0,0] = state_array[2,0] = 1 -#`state_array[1,0] = state_array[3,0] = 2 - -state_array[0,0] = state_array[1,1] = 1 -state_array[0,1] = state_array[1,0] = 2 -from quimb.tensor.fermion_2d import gen_mf_peps - -psi = gen_mf_peps(state_array) # this is now a 2d mean field PEPS - - -sz = ops.measure_sz() -nop = ops.count_n() - -sz_ops = {(ix,iy): sz for ix, iy in itertools.product(range(Lx), range(Ly))} -n_ops = {(ix,iy): nop for ix, iy in itertools.product(range(Lx), range(Ly))} - - -book = {(0,0):"0", (0,1):"+-", (1,0):"+", (1,1):"-"} - -def print_block(blk): - qlab= [iq.n for iq in blk.q_labels] - ind = np.where(abs(np.asarray(blk)) > 1e-20) - need_print = False - for ixs in zip(*ind): - val = np.asarray(blk)[ixs] - desc = "|" - for ix, s in enumerate(ixs): - desc += book[(qlab[ix], s)] - if ix != len(ixs)-1: - desc += "," - else: - desc += ">" - - - if (desc.count("+"), desc.count("-")) != (2,2): - desc = str(val) + desc - print(desc) - need_print = True - return need_print - - -ket = psi.contract(all) -tsr = ket.data.to_sparse() -for iblk in tsr: - print(np.asarray(iblk)) - -from pyblock3.algebra.fermion import SparseFermionTensor -np.random.seed(3) - - -def callback(su): - psi1 = su.get_state() - #for itsr in psi1: - # itsr.data.data[abs(itsr.data.data)<1e-9] =0 - state = psi1.contract(all) - tsr = state.data.to_sparse() - - need_print = False - for blk in tsr.blocks: - need_print = need_print or print_block(blk) - if need_print: - print("Ending Cycle%i"%su._n) - - -su = SimpleUpdate( - psi, - Ham, - chi=128, # boundary contraction bond dim for computing energy - D = 4, - compute_energy_every=100, - compute_energy_per_site=False, - keep_best=True, - ordering = 'sort', - gauge_smudge = 1e-6, - callback=callback#, - #gate_opts = {'cutoff': 1e-6} -) -tau = 0.01 -su.evolve(2, tau=tau) - -second_dense = min(Lx, Ly)>1 -sz_expecs = su.get_state().compute_local_expectation(sz_ops, return_all=True, second_dense=second_dense,normalized=True) -n_expecs = su.get_state().compute_local_expectation(n_ops, return_all=True, second_dense=second_dense,normalized=True) - -print(su.get_state()[0,0].shape) -for ix, iy in itertools.product(range(Lx), range(Ly)): - print("(%i, %i): SZ=%.2f, N=%.2f"%(ix,iy,sz_expecs[(ix,iy)][0]/sz_expecs[(ix,iy)][1], n_expecs[(ix,iy)][0]/n_expecs[(ix,iy)][1])) - -norm = su.get_state().compute_norm() -print(norm) diff --git a/docs/examples/ex_fermion2d.ipynb b/docs/examples/ex_fermion2d.ipynb deleted file mode 100644 index c20c4112..00000000 --- a/docs/examples/ex_fermion2d.ipynb +++ /dev/null @@ -1,433 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "2D Hubbard Model Example\n", - "========" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is an example of how to run SimpleUpdate for 2D Fermionic PEPS.\n", - "We'll define a 2D Hubbard Hamiltonian first:" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import itertools\n", - "from quimb.tensor.fermion_2d_tebd import Hubbard2D, SimpleUpdate\n", - "from pyblock3.algebra import fermion_operators as ops\n", - "\n", - "t=1\n", - "u=4\n", - "Lx = Ly = 2\n", - "mu = -0.9\n", - "Ham = Hubbard2D(t, u, Lx, Ly, mu=mu)\n", - "efci = -5.702748483462062\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we'll generate some initial guess to begin our SimpleUpdate. In quimb, we can use an integer array with shape (Lx, Ly) to generate a mean field inital guess. The integer value encodes the spin state for that site, specifically:\n", - "\n", - "0: |vac> \n", - "1: |+> \n", - "2: |-> \n", - "3: |-+> " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we use a mean field AFM lattice as initial guess\n" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [], - "source": [ - "state_array = np.zeros([Lx,Ly])\n", - "state_array[0,0] = state_array[1,1] = 1 # up spin at (0,0) and (1,1)\n", - "state_array[0,1] = state_array[1,0] = 2 # down spin at (0,1) and (1,0)" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.0\n" - ] - } - ], - "source": [ - "from quimb.tensor.fermion_2d import gen_mf_peps\n", - "\n", - "psi = gen_mf_peps(state_array) # this is now a 2d mean field PEPS\n", - "norm = psi.compute_norm()\n", - "print(norm)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can compute the Sz at each site to make sure it's a fully AFM state" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0, 0): SZ=0.50, N=1\n", - "(0, 1): SZ=-0.50, N=1\n", - "(1, 0): SZ=-0.50, N=1\n", - "(1, 1): SZ=0.50, N=1\n" - ] - } - ], - "source": [ - "sz = ops.measure_sz()\n", - "nop = ops.count_n()\n", - "\n", - "sz_ops = {(ix,iy): sz for ix, iy in itertools.product(range(Lx), range(Ly))}\n", - "n_ops = {(ix,iy): nop for ix, iy in itertools.product(range(Lx), range(Ly))}\n", - "\n", - "sz_expecs = psi.compute_local_expectation(sz_ops, return_all=True, normalized=True)\n", - "n_expecs = psi.compute_local_expectation(n_ops, return_all=True, normalized=True)\n", - "\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.2f, N=%i\"%(ix,iy,sz_expecs[(ix,iy)][0], n_expecs[(ix,iy)][0]))\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we construct our SimpleUpdate Object" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=6000, tau=0.001, energy~-5.701882: 100%|██████████| 6000/6000 [11:17<00:00, 8.85it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0, 0): SZ=0.02, N=1.13\n", - "(0, 1): SZ=-0.02, N=1.13\n", - "(1, 0): SZ=-0.02, N=1.13\n", - "(1, 1): SZ=0.02, N=1.13\n" - ] - } - ], - "source": [ - "su = SimpleUpdate(\n", - " psi,\n", - " Ham,\n", - " chi=128, # boundary contraction bond dim for computing energy\n", - " D = 12,\n", - " compute_energy_every=100,\n", - " compute_energy_per_site=False,\n", - " keep_best=True,\n", - " ordering = 'random',\n", - " gauge_smudge = 1e-6,\n", - " callback=None\n", - ")\n", - "tau = 0.001\n", - "su.evolve(6000, tau=tau)\n", - "\n", - "sz_expecs = su.get_state().compute_local_expectation(sz_ops, return_all=True, normalized=True)\n", - "n_expecs = su.get_state().compute_local_expectation(n_ops, return_all=True, normalized=True)\n", - "\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.2f, N=%.2f\"%(ix,iy,sz_expecs[(ix,iy)][0], n_expecs[(ix,iy)][0]))" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=6000, tau=0.001, energy~-5.701889: 100%|██████████| 6000/6000 [11:08<00:00, 8.98it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0, 0): SZ=-0.01, N=1.08\n", - "(0, 0): SZ=-0.01, N=1.08\n", - "(0, 1): SZ=0.01, N=1.08\n", - "(0, 1): SZ=0.01, N=1.08\n", - "(1, 0): SZ=0.01, N=1.08\n", - "(1, 0): SZ=0.01, N=1.08\n", - "(1, 1): SZ=-0.01, N=1.08\n", - "(1, 1): SZ=-0.01, N=1.08\n" - ] - } - ], - "source": [ - "su1 = SimpleUpdate(\n", - " psi,\n", - " Ham,\n", - " chi=128, # boundary contraction bond dim for computing energy\n", - " D = 16,\n", - " compute_energy_every=100,\n", - " compute_energy_per_site=False,\n", - " keep_best=True,\n", - " ordering = 'random',\n", - " gauge_smudge = 1e-6,\n", - " callback=None\n", - ")\n", - "tau = 0.001\n", - "su1.evolve(6000, tau=tau)\n", - "\n", - "sz_expecs = su1.get_state().compute_local_expectation(sz_ops, return_all=True, normalized=True)\n", - "n_expecs = su1.get_state().compute_local_expectation(n_ops, return_all=True, normalized=True)\n", - "\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.2f, N=%.2f\"%(ix,iy,sz_expecs[(ix,iy)][0], n_expecs[(ix,iy)][0]))\n", - " print(\"(%i, %i): SZ=%.2f, N=%.2f\"%(ix,iy,sz_expecs[(ix,iy)][0], n_expecs[(ix,iy)][0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Unfortunately, PEPS uses approximate contraction and certain symmetry in the initial guess may not be preserved" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=6000, tau=0.001, energy~-5.699187: 100%|██████████| 6000/6000 [13:25<00:00, 7.45it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0, 0): SZ=0.06, N=1.01\n", - "(0, 1): SZ=-0.06, N=1.01\n", - "(1, 0): SZ=-0.06, N=1.01\n", - "(1, 1): SZ=0.06, N=1.01\n" - ] - } - ], - "source": [ - "su2 = SimpleUpdate(\n", - " psi,\n", - " Ham,\n", - " chi=128, # boundary contraction bond dim for computing energy\n", - " D = 24,\n", - " compute_energy_every=100,\n", - " compute_energy_per_site=False,\n", - " keep_best=True,\n", - " ordering = 'random',\n", - " gauge_smudge = 1e-6,\n", - " callback=None\n", - ")\n", - "tau = 0.001\n", - "su2.evolve(6000, tau=tau)\n", - "\n", - "sz_expecs = su2.get_state().compute_local_expectation(sz_ops, return_all=True, normalized=True)\n", - "n_expecs = su2.get_state().compute_local_expectation(n_ops, return_all=True, normalized=True)\n", - "\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.2f, N=%.2f\"%(ix,iy,sz_expecs[(ix,iy)][0], n_expecs[(ix,iy)][0]))" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "chi=128\n", - "-5.701882215719689 -5.701882215719689 [0.02132075763649735, -0.0213207576409023, -0.02132075763619834, 0.021320757640603233] [1.1301947125656033, 1.1301947125660177, 1.1301947125660234, 1.1301947125656127]\n", - "-5.701889174439364 -5.701889174439364 [-0.00907496003109931, 0.009062174991132932, 0.009057952980593226, -0.009045148380751517] [1.079505995534109, 1.079525465973902, 1.0795266433543542, 1.079496290829968]\n", - "-5.699187240224151 -5.699187240224151 [0.061339504184178995, -0.061328171581932406, -0.06134870578994964, 0.06133737150197102] [1.0106123764105819, 1.0110268682097463, 1.0118180744912104, 1.0113771545526176]\n" - ] - } - ], - "source": [ - "psi0 = su.get_state()\n", - "psi1 = su1.get_state()\n", - "psi2 = su2.get_state()\n", - "\n", - "max_bond = 128\n", - "ene0 = psi0.compute_local_expectation(Ham.terms, normalized=True, max_bond=max_bond)\n", - "z0 = psi0.compute_local_expectation(sz_ops, return_all=True, normalized=True, max_bond=max_bond)\n", - "n0 = psi0.compute_local_expectation(n_ops, return_all=True, normalized=True, max_bond=max_bond)\n", - "ene1 = psi1.compute_local_expectation(Ham.terms, normalized=True, max_bond=max_bond)\n", - "z1 = psi1.compute_local_expectation(sz_ops, return_all=True, normalized=True, max_bond=max_bond)\n", - "n1 = psi1.compute_local_expectation(n_ops, return_all=True, normalized=True, max_bond=max_bond)\n", - "ene2 = psi2.compute_local_expectation(Ham.terms, normalized=True, max_bond=max_bond)\n", - "z2 = psi2.compute_local_expectation(sz_ops, return_all=True, normalized=True, max_bond=max_bond)\n", - "n2 = psi2.compute_local_expectation(n_ops, return_all=True, normalized=True, max_bond=max_bond)\n", - "print(\"chi=%i\"%max_bond)\n", - "print(ene0, su.energies[-1], [val[0] for val in z0.values()], [val[0] for val in n0.values()])\n", - "print(ene1, su1.energies[-1], [val[0] for val in z1.values()], [val[0] for val in n1.values()])\n", - "print(ene2, su2.energies[-1], [val[0] for val in z2.values()], [val[0] for val in n2.values()])" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 43, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAD4CAYAAADvsV2wAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAA3AklEQVR4nO3dd3yV5f3/8deVTQIkkDCzwwoBwp7K3ltUVFxoVbRfbe3PqtVq1bZSrbVWa1snqCiKiy3I3kNImAkJIQnZZJM9T871++OAIhAJyUnuc5LP8/HgATnn3Pf9uVHeuXPd1/25lNYaIYQQzZ+D0QUIIYRoGhL4QgjRQkjgCyFECyGBL4QQLYQEvhBCtBBORhfwS3x8fHRQUJDRZQghhF2JjIzM1Vp3uPx1mw78oKAgIiIijC5DCCHsilIq+Wqvy5COEEK0EDYZ+Eqp2Uqp9wsLC40uRQghmg2bDHyt9Tqt9SJPT0+jSxFCiGbDpsfwhRCiurqatLQ0KioqjC7F5ri5ueHn54ezs3OdPi+BL4SwaWlpabRp04agoCCUUkaXYzO01uTl5ZGWlkZwcHCdtrHJIR0hhLiooqICb29vCfvLKKXw9va+rp98bDLw5aatEOJSEvZXd71/LzYZ+A29afufb5/ktc8fsnJVQghh32wy8BsqMncP35fvx1xTY3QpQohm6L777uObb7654vWMjAxuvfXWOu/nueeew9/fn9atW//s9TfeeIOwsDDCw8OZOHEiyclXfY7qujXLwA9rO4AcJwf2Hf/O6FKEEC1I165dr/qNoDazZ8/m0KFDV7w+cOBAIiIiOHHiBLfeeitPP/20VeprloE/dfBCAHZEf2lwJUKI5mDZsmWEh4fTv39/7rnnHgB2797NqFGjCAkJ+THkk5KS6Nu3b533O2LECLp06XLF6+PHj8fd3f3Hz6SlpVnhLJrptMzwnqPw36WJNccYXYoQwor+vC6aUxlFVt1nWNe2vDi7T63vR0dH8/LLL7N//358fHzIz8/niSee4Ny5c+zdu5fY2FjmzJlzxVDO6dOnuf3226+6z507d+Ll5VWn+pYsWcL06dPrfD6/pFkGPkCo8mWnazp5BZl4e3U2uhwhhJ3avn078+fPx8fHB4D27dsDcNNNN+Hg4EBYWBhZWVlXbNerVy+OHTvWoGN/9tlnREREsGvXrgbt5yKbDHyl1Gxgdvfu3eu9jyH+k9iSuYz1+5awcOZz1itOCGGYX7oSb2qurq4//llrfcX7Db3C37p1K4sXL2bXrl0/O1ZD2OQYvjV66cy84QFczZojGTusWJkQoqWZMGECX3/9NXl5eQDk5+fXabuLV/hX+3WtsD969CgPP/wwa9eupWPHjg09hR/ZZOBbg2fr9oRWuRGrMo0uRQhhx/r06cNzzz3H2LFj6d+/P0888YTV9v3000/j5+dHWVkZfn5+vPTSSwA89dRTlJSUMH/+fAYMGMCcOXOscjx1tR9FbMWQIUN0QxZAWfzZfayoiWTpkDcZ2meiFSsTQjSVmJgYevfubXQZNutqfz9KqUit9ZDLP9tsr/ABJva/E4Btx5YbXIkQQhivWQf+sLCJdKnWnCo9aXQpQghhuGYd+A6OjoTqjsQ6l1FcWmB0OUIIYahmHfgAAzqPo9zBge/2fWR0KUIIYahmH/izb3wQZ62JSNlsdClCCGGoZh/4Hdp1pVeVM7HaOr0ohBDCXjX7wAcIdQsl2QWiE+o/xVMIIS5q7PbIAF999RVhYWH06dOHO++8s0H1XmSTgW/tFa/Ght0GwKaIpVbZnxBCXI212iOfOXOGV155hX379hEdHc2bb75plfpsMvCt0VrhUmMGzsHHZCaq4IhV9ieEaFmauj3yBx98wKOPPkq7du0ArNZewSabp1mbg6MjYWYfIlxyOV+YQzvPDkaXJISoj43PQKaVn6vp3A+mv1rr20a0R46LiwPghhtuoKamhpdeeolp06Zd/7ldpkUEPsDYoFvYnfEBy7f+ncdued3ocoQQdsKI9sgmk4kzZ86wc+dO0tLSGDNmDCdPnqxzD/3atJjAv3n8r3nvk/c4mLuDx4wuRghRP79wJd7UGrM9sp+fH8OHD8fZ2Zng4GB69uzJmTNnGDp0aINqtskx/Mbg5OTMYIKIcq0kLvmE0eUIIeyEEe2Rb7rpJnbu3AlAbm4ucXFxhISENOQ0gBYU+ACzBz5MjVJ8tVuGdIQQdWNEe+SpU6fi7e1NWFgY48eP5x//+Afe3t4NPl6zbo98NXM/6IfSitWL5CpfCHsg7ZF/mbRH/gVD3QaQ4KrZe3S90aUIIUSTanGBf9uYp3DUmrVH3jG6FCGEaFItLvB7BobTt9KVSJIwmaqNLkcIIZpMiwt8gBE+48l2cmDlDrnKF0K0HC0y8O+a9AfczWZ2nP3a6FKEEKLJtMjAb+fZgYHV7TnqnE9Bca7R5QghRJNossBXSoUopZYopereSq4RjQuaR6mDA59vec3oUoQQdsYa7ZHLysqYOXMmoaGh9OnTh2eeeeaKz3z77bcopbDW9PQ6Bb5SaqlSKlspFXXZ69OUUqeVUvFKqSurvYTWOlFr/UBDirWmm8c9io/JzIGc7UaXIoRoJq63PfKTTz5JbGwsR48eZd++fWzcuPHH94qLi3nrrbcYPny41eqr6xX+x8DPWrUppRyB/wLTgTBggVIqTCnVTym1/rJf1untaUUuLq4MVcGccK1g//GN195ACNFiNUZ7ZHd3d8aPHw+Ai4sLgwYNIi3tp5X5/vSnP/GHP/wBNzc3q51HnZqnaa13K6WCLnt5GBCvtU4EUEqtAOZqrV8BZtW3IKXUImARQEBAQH13UyeLJr3Kjm138NEPf2VU/+mNeiwhRMP9/dDfic2Pteo+Q9uH8odhf6j1/aZoj1xQUMC6det4/PHHAThy5AipqanMnDmTf/zjHw0/yQsa0i3TF0i95Os0oNafPZRS3sBiYKBS6tkL3xiuoLV+H3gfLK0VGlDfNXUP6MtYsz9bXFLZe2wDNw6Y0ZiHE0LYocZuj2wymViwYAG//e1vCQkJwWw288QTT/Dxxx9b8zSAJmyPrLXOAx5pquPV1f9NeZ3dW2/j4x/+KoEvhI37pSvxpmat9siLFi2iR48e/O53vwMsY/dRUVGMGzcOgMzMTObMmcPatWsZMuSK9jjXpSGBnw74X/K134XXGkwpNRuY3b17d2vs7heF+PdhrDmQTa4p7DmyltGD5jT6MYUQ9mPChAnMmzePJ554Am9v7+tuj/xLnn/+eQoLC/nwww9/fM3T05Pc3J+mi48bN47XX3+9wWEPDZuWeRjooZQKVkq5AHcAaxtcEdZf0/Za/m/aG7TSmk8irjrKJIRowRqrPXJaWhqLFy/m1KlTDBo0iAEDBvws+BtDna7wlVJfAOMAH6VUGvCi1nqJUuoxYBPgCCzVWkc3WqWNKNg3lLHmIL53SWZX5BrGDp5rdElCCBuycOFCFi5cWOv7JSUlAAQFBREVFVXr5y7l5+d31aGgy11cCMUa6nSFr7VeoLXuorV21lr7aa2XXHh9g9a6p9a6m9Z6sbWKUkrNVkq9X1hYaK1dXtOj0/9FK61ZJlf5QohmyiZbKzT1kA5AYNeejNXBHHYtYWfEqiY7rhBCNBWbDHyjPDbtwlV+5N+NLkUIcQlbXpnPSNf792KTgW/EkA5AQNceTNDdOexWyvLvpceOELbAzc2NvLw8Cf3LaK3Jy8u7ridxW9yattdSWJzHnSvGUuqg+XTGavy79GjS4wshfq66upq0tDQqKiqMLsXmuLm54efnh7Oz889er21NWwn8q9h84Av+cHoxg6rasGTRgSY/vhBCNIQsYn4dpoxcwCxCOeRawn++fdLocoQQwipsMvCNGsO/1HMLltGr0oHPCzcSFf+DYXUIIYS12GTgGzEt83Juru48PfINTAoWb/+1LHguhLB7Nhn4tmJYv4nc6nYDUa7VvPblQ0aXI4QQDSKBfw1P3vYOAypc+bYqglU73jW6HCGEqDcJ/GtwcHTkzzM+oWMNvHb2bbYc/MrokoQQol5sMvBt4abtpUL8+/Dq6PdopeEvp/7MwRObjC5JCCGum00Gvi3ctL1c/1438NfBrwPw3OEniIo/aHBFQghxfWwy8G3VDQOm83zoc5Q6wFM7H+Rs2imjSxJCiDqTwL9OU0fdyZMB/0eOI/xu4+0S+kIIuyGBXw+3TnyUxzosIN1J89Cm+eyM+MbokoQQ4pok8OvpvlnP8WL3pzABT598kSVrnze6JCGE+EU2Gfi2NkunNrNHL+S/4z/Fr9qRN8+v4fmP51FTI0/kCiFsk00Gvi3O0qlNn5BBLF2wk5EVnqxR8Ty05EbOZZ81uiwhhLiCTQa+vfFq0553H9zFLYRzxKWUO9bN4qN1z6HNZqNLE0KIH0ngW4mDoyMvLVzOy92fwbPGgTfy13L/h8M5k9z0/fyFEOJqJPCtbNaNd/P5PQe5ydyHKOdy7t6+kDe+fBBTdaXRpQkhWjgJ/EbQupUHf71/BW8MfJPgKlc+qviB2Z8M5uP1z1BjqjK6PCFECyWB34jGDJjEp786xAPuM3HQin/mfcfcjwfzxcbnMdeYjC5PCNHC2OSatkqp2cDs7t27P3TmzBmjy7GKkooK/rfyOXaUbiLNRRFcDbd0nsIdk/6Eq5uX0eUJIZoRWcTcRuSXlPLOqmfZU7GddBeFZ42Zic5BPDDmWQICbzS6PCFEMyCBb2POl1Tw0fdvEZn9LdHuZZiBoSY35oXMYNLw3+Lm4WN0iUIIOyWBb6Oqa8ysPLCT7cffINbtLPlODniYzdyg2nNTr7mMGPQIzq6tjS5TCGFHJPDtQGRSNit3vU9y0ffEexRQ6qjwqjEz1rkDE4MmMqL/A7Rq29XoMoUQNk4C346UVppYdzSRnZHvcV7vI9mjhFIHhatZMxw3xncawug+d9Ep4EZQyuhyhRA2RgLfTiXmlPDtkbNERH2JctxPdusccpwt73UzmRnZqiujfEczuO9duHt3M7ZYIYRNkMC3c1prjqYWsPpIGvtjtuPlvAezRzLJbuVUOSictaZ/jQPDWgcy1G804b1vxaV9iNFlCyEMIIHfjJhqzOxPyGP9iQw2RqfQ3uEAnT2PUuaRRbJjFVop3Mxm+tc4MLR1IEN8R9G35024duwtQ0BCtAAS+M1UpamGPXG5rDuRwdZTWZSbCglrfwS/9rGcc0gnUVl6+LiYNeEmM4NbdWVw52H07zETd79h4Ohs8BkIIazNrgK/OT5p2xTKq2rYcTqb9Scy2BaTTaXJTGdPE+NCzuDqdIzTZfHE1JRiVuCoNb2rTQxybs8gn/4M6jaNdsHjwa2t0achhGgguwr8i+QKv/5KKk1sPZXF2uMZ7I7LwWTWdO/Ymun9vOjuc4bk7B1E5p0kquo8VRdGebpVVTPYwYPB7XozKGginbtPg7ZdjD0RIcR1k8BvwfJLq9hw8hxrj2VwKCkfgKFB7bh5kB8Tw9qTev4oRxI2EJkVybGKTEqx/D/hW21iqNmZIV49GBo4ga7dp0H7ELkPIISNk8AXAKQXlLP6aDorj6SRkFOKi5MDU8I6cctgP8b06ICmhrjcU0QmrCci/QCRpakUUgNA12oTQ2sUIzx7MCJwIj7dp4FPD/kGIISNkcAXP6O15kRaISuPpLH2eAbny6rp3NaNWwf7cdsQfwK83QEwazPx+Wc4nLCByPTdHCo6++M3gO5VVYwwOTCyXShDQ6bRqtsk+QlACBsggS9qVWUysz02ixWHU9kdl4NZw8gQb+4Y5s+0vp1xdXL88bNmbSY2L4YDCRs4mLqLI6UpVKFxNZsZWlHJGLMrYzoPxbfbFAgZL/cAhDCABL6ok3OF5XwTkcZXkamk5pfj7eHC/CH+3DU8AP/27ld8vsJUwZGsSPbEr2d3xl5SqgoACKmqZlxZGeNdOtIvaBKO3SZA0A3g4tHEZyREyyOBL66L2azZG5/LZweT2RqThQbG9uzA3cMDGR/aEUeHqw/bJBUmsSdtN7sSNxCZfwoTmvY1ZsaWlTGuvIqRHQfQqvsU6D4Z5EEwIRqFBL6ot3OF5XxxKJUVh1LILq4koL07C0cFcdsQP9q41f7gVlFVEXvT9rIzZRt70nZTUlOBm4YbysqYUFbOWAdPPLtNhJ5TIWQcuLZpupMSohmTwBcNVl1jZlN0Jh/tSyIy+TweLo7MH+LPfaOCCPL55aGa6ppqIrIi2JayjR3JW8muyMMRGFJpYnJxERPLq/EJGGUJ/55TLTd/hRD1IoEvrOpEWgEf70ti3YkMTGbNlLBOLBrTjcGB7a65rVmbic6NZlvKNrYlbyWpOBkFDDYpphTkMamsjA7te0KvGRA6E7oOAgeHxj8pIZoJCXzRKLKLK/j0QDLLDiRTWF7NkMB2LBoTwqTenXCoZZz/Ulpr4gvi2Zy8mS1JW0goTEABQ7QrM/IymVxaiqd7R+g1DUJnQ/AYcHJp/BMTwo5J4ItGVVpp4quIVD7cc5b0gnJCOnjwyNhuzBvoi7Nj3a/OEwoS2Jy0mQ1nN5BUlISTcuBG1ZoZuRmMKzpPK5e2lvDvPRu6TQSXK2cOCdHSSeCLJmGqMbMhKpN3dyZw6lwRvl6teHhsCLcN8cfN2fHaO7hAa01MfgwbEjewMWkj2WXZeDi6MsXBizmZZxlUlIuDszv0mAJ95ll+l/AXArCBwFdK3QTMBNoCS7TWm6+1jQS+/dJas/N0Dv/ZEU9k8nl8Wrvy0Ohg7h4RiIer03Xty6zNRGZFsjZhLZuTNlNmKsPXzZvZypO5qTH4FWeBs7vlZm/YTRL+osVrUOArpZYCs4BsrXXfS16fBrwFOAIfaq1frcO+2gGva60fuNZnJfDtn9aag4n5/HdHPHvjc2nv4cKiMSHcOzIQd5frC36AsuoytqVsY23CWn449wMazXCvXswzOTMp8TCupTng0tpyw7ffrZanfWXMX7QwDQ38MUAJsOxi4CulHIE4YDKQBhwGFmAJ/1cu28WvtNbZF7b7J7Bca33kWseVwG9ejqSc582tZ9gdl4P3heC/p57BD5BZmsma+DWsil9Fekk6bVzaMNN7ILeUlBEatx0qCqBVO+g9B/rNh8AbZLaPaBEaPKSjlAoC1l8S+COBl7TWUy98/SyA1vrysL+4vQJeBbZorbfW5ZgS+M1TZPJ53twax54zuXh7uPDrcd24e0TgdY3xX8qszRzKPMTKMyvZlryNKnMVfb37MN8zjGlZZ3E/vQmqS6Gtn+WqP/x26BRm5bMSwnY0RuDfCkzTWj944et7gOFa68dq2f63wEIsPwkc01q/W8vnFgGLAAICAgYnJyfXqT5hfyKT83ljSxz74vPo4unGbyb0YP4Qv+ua1XO5wspC1ieu55u4b4gviMfD2YMZgVOY79SB3vF7IGEbmE3Qqa8l+MNvgzadrXhWQhjP8MCvD7nCbxn2J+Ty+qbTHEkpINDbnf83qSez+3ettV9PXWitOZ5znK/jvmZT0iYqayoJ7xDOHUEzmVJUiGvUt5B2GJQDdJsA/RdYHvJybmXFMxPCGIYP6VxnsbKmbQujtWZ7bDavb44j5lwRoZ3b8Mz0UMb27IBqYIO1wspC1iWs48vTX5JUlISXqxfzus9jfsdh+MfvhOMroDAVXNtC2FwYeDf4D5fGbsJuNUbgO2G5aTsRSMcyVHOn1jraWkXLFX7LYzZr1p88x+ubTpOSX8bIEG+emR5Kf3+vBu9ba80PmT/w1emv2J6yHbM2M9Z/LHf3upNhldWoEysgerVlvN+7Owy403Ll37Zrg48tRFNq6CydL4BxgA+QBbyotV6ilJoBvIllZs5SrfViaxYtgd9yVZnMfHEohX9vO0NeaRUz+3Xhqam9rtmkra6ySrP4Ku4rvj79Necrz9Pdqzt39b6Lmb7jaBW3CY4th+R9Pw35DLzHMtVTpngKO2D4g1fXQ4Z0xEUllSY+2J3IB3sSqa4xc/eIQH47oQftPKwTvJU1lWw8u5HlMcuJzY/Fy9WL23rdxoLQBfiUFcKxzy2/ijPA3Qf632EJ/46hVjm+EI3BrgL/IrnCFxdlF1fwry1n+PJwCh6uTvxmQncWjgr62fKLDaG1JjIrkk9PfcqO1B04Ozgzq9ss7g27l25tgyB+GxxdBqc3Wmb5+A2DwfdZ2jrIU73Cxkjgi2YhLquYv22IYefpHPzateKZ6aHM7NelwTd2L5VUmMSnpz5lTcIaKmsqGeM3hgf6PsCgToOgJAdOrIAjyyA3znKjN/w2GLQQuoRbrQYhGkICXzQre87ksPi7GGIzixkS2I4XZocR7udl1WPkV+Tz5ekv+SLmC85XnmdQx0E80O8BRvuORgGkHIDIT+DUajBVWPr2D7kf+t4ia/cKQ9lV4MsYvqiLGrPm64hUXt98mtySKm4e5MvTU0Pp7Olm1eOUVZexKn4VH0d/TGZpJj3b9eTBfg8yJXAKjg6OUH4eTnwFER9BTsxPV/2D74fOfa99ACGszK4C/yK5whd1UVxRzX93JLB071kcHRS/HteNRWNC6t2qoTbV5mo2JG5gadRSEgsTCWobxKLwRUwPno6TgxNoDak/WII/ehXUVILfUBjygGWs39m634iEqI0Evmj2UvLKeGVjDBujMvH1asVzM3szvW9nq47vg6V3z9bkrbx34j3izscR0CaAh8IfYmbITJwdLizqXpYPx7+AiKWQF29p4jbgLhjyK/DuZtV6hLicBL5oMfYn5PKXdaeIzSxmZIg3L8wOo3eXtlY/jlmb2ZG6g/eOv0dMfgy+rX15OPxhZnebbbniB8tV/9ndELEEYr+zzPAJGQdDH4Se08Gxfp1ChfgldhX4MoYvGspUY+aLw6n8c/NpisqruWt4IL+f0hMvd+s/OKW1Znfabv53/H+cyjuFfxt/Hg5/mJkhM38KfoDiTDjyKUR+BEXp0NbXMs4/6F5o08nqdYmWy64C/yK5whcNVVBWxb+2xPHpwWQ8Wznz1NRQbh/q36DGbLXRWrMrbRf/O/Y/YvJjCGwbyMPhD/80xn9RjQnObILDH0LCdnBwsvTsH/YQBIyUHj6iwSTwRYt2KqOIl9ZGcygpn36+nrw0pw+DA9s1yrG01mxP3c47x97h9PnTBLYNZFH4ImYEz/h58APkJVjG+Y9+ChWF0LGPJfjDb5OpnaLeJPBFi6e1Zu3xDP62IYasokpuGeTHM9ND6dDGtVGOZ9ZmdqTs4N0T7xKbH/vjzd1ZIbOuDP6qMjj5NRz+ADJPgqunpXnbsIfkJq+4bnYV+DKGLxpTaaWJt7fHs2RvIm7Ojvx+ck/uHhGIUwMWXvklWmt2pO7g3ePvEpMfg19rPx7o9wBzus3BxdHl8g9D6iFL8EevBnM1dJ8Ewx62/C5LNIo6sKvAv0iu8EVjis8u4aW10eyNz6V3l7b8dW4fhgS1b7TjXRzjf/f4u0TnRdPJvRP3972fW3rcgpvTVeboF2fBkU8sQz7F56BdsGV2z8C7oZVXo9Up7J8EvhBXobVmY1Qmf11/inOFFdwyyI9nZ4Ti07pxhnkuHvNAxgHeO/EeR7KP4O3mzb197uX2Xrfj4XyVcfuaaohZB4fet7RzcHa3dO0c9rB07RRXJYEvxC8oq7IM83y4J5FWzo48NS2UO4cFNMpsnktFZEbwwckP2J+xn7Yubbmr913c1fsuPF09r77BuRNw6D04+Y2lf0/wWBj+MPScBg7WfbJY2C8JfCHqID67hBfWRLE/IY9wP0/+OrevVVbbupaTOSf54OQH7EjdgbuTO7f3up17+9yLTyufq29QmmcZ7jm8BIrSwCsQhi2S4R4BSOALUWdaa9adOMfL60+RU1LJncMCeHpqKJ7uzo1+7LjzcXx48kM2JW3CSTkxr8c8ftX3V3RtXcsyizUmOP0dHHwXUvaDs4dlds/wR8Cne6PXK2yTXQW+zNIRtqC4opp/bTnDJweSaOfuzB9n9GbeQF+r9+a5mpSiFJZGLWVNwhrQMDNkJg/2e5Agz6DaN8o4Bj+8C1HfQk0V9JgCI34NIePlYa4Wxq4C/yK5whe2IDqjkOdXR3E0pYDhwe15+aa+9OjUpkmOnVmayUdRH/HtmW+pqqliStAUHur3EL3a96p9o5Jsy8yewx9CaQ50DLMEf7/bpGNnCyGBL0QDmM2aLyNSeXVjLKWVJh4aE8JvJ/SglUvT3CjNK8/j01OfsuL0CkqrSxnjN4YH+z3IwI4Da9/IVGm5uXvwf5AVZVmTd9hDlqmdHrXcGxDNggS+EFaQV1LJKxtj+SYyDb92rfjL3D5MCG26xmdFVUWsiF3BZ6c++3EVrgf7PciNvjfWPtR0sWPnwf9B3Pfg5GYZ5x/5mDzF20xJ4AthRQcT83h+dRTx2SVM69OZF+eE0cWzVZMd/+IqXB9FfURWWRa92vViYZ+FTAue9lNP/qvJiYMDb8PxFZb5/aEz4Ybfgf/QJqtdND4JfCGsrMpk5sO9ifx72xkcleL/Te7JfaOCGq1Fw9VU11SzPnE9n0R/QkJhAp09OnN377u5teetV3+I66LiLMuDXIc/hIoCCBoNo39v6dUvN3jtngS+EI0kNb+MF9ZEseN0Dn26tuVv8/o1ydz9S5m1mb3pe/ko6iMisiJo49yGW3veyoLQBXRp3aX2DStLLPP5979tad/gO9gS/D2nS98eO2ZXgS/TMoW9udii4c/roskuruSeEYE8ObUXbd0af+7+5U7mnOTj6I/ZmrIVhWJCwATu7n03AzsOrH2c31QJxz6HfW/C+SRLm+YJz0GvGXLFb4fsKvAvkit8YW+KK6r55+Y4PjmQRIfWrrwwO4yZ/bo0ydz9y2WUZLAidgXfnPmG4qpiwrzDuDP0TqYGTb16szawPMgVvRJ2vgr5CdB1EEx4HrpNkOC3IxL4QjShE2kF/HHVSaLSixjXqwN/ndsX//buhtRSVl3G+sT1LI9ZTmJhIm1d2jKn2xzm95pPiGfI1TeqMVkWYd/1dyhMhcAbYOILEDCiaYsX9SKBL0QTM9WYWXYgmX9uPk2N1vxuUk8euDEY5ya8qXsprTURWRF8ffprtqRswWQ2MbTzUG7pcQsTAyZe/arfVAmRn8Ce16EkC/rNh8l/gba1tHoQNkECXwiDZBSU89LaaDafyiK0cxsWz+vXaMsr1lVeeR6r4lfxTdw3pJek09q5NVODpjK3+1wGdBhw5RBUVSnsfRP2vWVZg3fMkzDyUXBqvDbSov4k8IUw2OboTF5cG01mUQV3DQ/gqamheLZq+pu6lzJrMxGZEaxJWMOW5C2Um8oJbBvIzJCZTA+afmXvnvyzsOk5S8O29iEw/TXoMdmQ2kXtJPCFsAEllSbe2BzHx/vP4t3alRcNvKl7udLqUrYkb2FN/Boisiz/7nq378204GlMC5r2846d8Vth4zOQdwYG3w9TF8ui6zZEAl8IG3IyrZBnV50gKr2I8b068BcDb+peTWZpJpuTNvN90veczD0JQHiHcKYETmFy4GRL+JsqYfvLljn87UPglg8s8/iF4STwhbAxphozH+9P4o0tcWgN/29yD351Q3CTPqlbF6nFqWxK2sTmpM3E5McA0Ne7L1OCLOHvl5sIqx6x3NQd9wzc+ISsvmUwCXwhbFR6QTkvrolia0w2YV3a8srNTf+kbl2lFqWyOXkzW5K3EJ0XDVjCf6rvaKac2UfXU+shYBTc/hl4eBtcbctlV4EvT9qKlkZrzfdRlpu6uSWVLBwVxO+n9KK1q5PRpdUqrTiNLclb2JS06cfwD3fvypT000x1bEfnu1ZC+2CDq2yZ7CrwL5IrfNHSFFVU89r3sSz/IYXObd34y9y+TA5ruvbL9ZVanMrmpM1sStpETH4MSmsGV5uZHv4AU/o/gJebl9EltigS+ELYkcjk8/xx5UlOZxUzvW9nXprTh05t7WO1quSiZDZEfcqG2C9JcgQn5cAo3xuZFTKL8f7ja2/rIKxGAl8IO1NlMvPBnkTe2nYGV0cHnp4eyl3DAnBwMH4KZ13ookxiV9zMxooMvvPuQrapBA9nDyYHTmZ2yGyGdB6Cg7KtG9TNhQS+EHYqKbeU51afZF98HoMD2/HKzf3o2URr6jZYZTF8fR818VuJmPgM65Rlrn+ZqYzOHp2ZFTKLOd3mEOwpY/3WJIEvhB3TWrPySDovf3eKkkoTj4ztxqPju+PmbAfTH01VsPwWSN4Pd35FedAodqbuZG3CWvZn7MeszYT7hDOn2xymBU/D09XT6IrtngS+EM1AfmkVL393ipVH0gn28WDxvL6M6mYHC5JXFMJHMyy99u/fAF36A5BTlsN3id+xJmEN8QXxODs4M95/PHO7z2VU11E4OdjuLCVbJoEvRDOy90wuf1x1kpT8MuYP9uOPM3rTzsPF6LJ+WdE5WDIZaqrggS3QLvDHt7TWxObHsiZhDRsSN3C+8jw+rXyYHTKbOd3m0L1ddwMLtz8S+EI0M+VVNfx7+xne352IVytnXpgdxpz+XW2iL0+tsmNh6RTw6AgPbAb39ld8pLqmmt3pu1kTv4Y9aXswaRN9vfsyt/tcpgdPlyGfOpDAF6KZOpVRxLMrT3A8rZCxPTvw8k221ZfnCsn7YdlNlmGd+9b/YovlvPI8NpzdwOr41cSdj5MhnzqSwBeiGasxa5YdSOIfm06jNfx+Sk/uGxVkc315fhS9Gr5eCMMfgel/r9MmsfmxrIlfw3eJ3/045HNxlk+Pdj0at147I4EvRAuQXlDOC6uj2BabTT9fT165uR99fW10CGTjM/DDO3DHFxA6o86bVddUszttN2sSfhryCfMOY263ucwIniFP9SKBL0SLobVmw0lLX57zZVU8eGMwv5vUk1YuNjaF01QJH06yrJn7yD7w9L3uXeSV57Hx7EbWJKwhNj8WJwcnxvqNZXa32YzxHYOzo7ELzBhFAl+IFqawrJpXNsaw4nAq/u1b8bd5/Rjdo4PRZf1cXgK8NwY6h8PCdeBY/zH50/mnWZuwlu8SvyOvIo92ru2YHjydOd3mEOYdZts3s61MAl+IFupgYh5/XHmSxNxSbh7oy/OzwmhvS1M4j6+AVQ/D2Gdg/LMN3p3JbGJ/xn7WxK9hR+oOqs3VBHsGM6fbHGYGz6RL6y5WKNq2GR74SqnewOOAD7BNa/3OtbaRwBfCOiqqa/jvjnje2ZlA21bO/GlWb24a4Gs7V72rHoETX8K9ayF4tNV2W1hZyObkzaxPWM+R7CMoFEM7D2VWyCwmB06mtUtrqx3LljQo8JVSS4FZQLbWuu8lr08D3gIcgQ+11q/WYV8OwDKt9d3X+qwEvhDWdTqzmGdWnuBoSgFjenZgsa1M4awssQztVJfBr/dfdX5+Q6UWp7I+cT3rE9aTUpyCq6Mr4/zHMStkFjf43oCzQ/MZ729o4I8BSrAEdd8LrzkCccBkIA04DCzAEv6vXLaLX2mts5VSc4BfA59qrT+/1nEl8IWwvhqz5rODybz2fSw1WvPE5J62sbTiuePwwQQImwu3Lm20w2itOZl7kvWJ6/n+7PecrzyPl6sXU4OmMjNkJgM6DLCdn3zqqcFDOkqpIGD9JYE/EnhJaz31wtfPAmitLw/7q+3rO631zFreWwQsAggICBicnJxcp/qEENcno6CcF9ZEszUmi76+bXn15nDjp3Du+gfseBnmfwx95jX64arN1RzIOMC6hHXsTN1JRU0Fvq19mRE8gxnBM+y2pUNjBP6twDSt9YMXvr4HGK61fqyW7ccBNwOuwAmt9X+vdUy5wheicdncFM4aEyyZBOeT4dEfoHXHJjt0aXUp21O2893Z7ziYcZAaXUPPdj2ZHjyd6cHT8W19/dNGjWJ44NeHBL4QTePSKZwB7d3527x+3NjDoC6c2bGW8fzuk+CO5WDA8EpueS6bkjax8exGjuccB6B/h/5MD57O1KCp+LSy7Q6lNjWkU4djySLmQhjgQEIef1x1krO5pdwyyI/nZxrUhXPfv2HLn2Dee9D/jqY//iXSS9LZeHYjG89uJO58HA7KgaGdhjI1eCqTAibRzq2dofVdTWMEvhOWm7YTgXQsN23v1FpHW6toucIXoulVVNfw9vYzvLcrEU+junCaayz987Nj4P8O1Osp3MYQfz6e75O+Z1PSJpKKknBUjozoMoKpQVOZEDDBZjp5NnSWzhfAOCxz6LOAF7XWS5RSM4A3sczMWaq1XmzNoiXwhTBOzLkinll5kuOpBYzrZenC6deuCadw5iXAuzdCwEi4+1tDhnZqc7F//8XwTy9Jx8nBiZFdRjI1aCrjA8bT1qWtYfUZ/uDV9ZAhHSFsw6VdOAGenNKLhaOCcGyqhdQPfQAbnoQZr8Owh5rmmNdJa010XjSbkjaxKWkT50rP4eTgxKiuo5gSOIVx/uOa/MrfrgL/IrnCF8I2pJ0v40+ro9hxOof+/l68enM/endpgitYsxk+vw3O7oZFO6BTn8Y/ZgNcnOO/KWkTW5K3WMJfOTG863CmBk5lvP/4JunmKYEvhGgQrTVrj2fwl3WnKCyvZtGYEH47sUfjL6RekgPvjAJ3b0voO7dq3ONZycUr/81Jm9mcvJn0knQclSNDOg9hcsBkJgZObLTZPnYV+DKkI4TtOl9axeINMXwTmdZ0C6nHb4PPboYhD8CsNxr3WI1Aa01MfgxbkrewNXkrSUVJKBQDOg5gUsAkJgZOtOo8f7sK/IvkCl8I23XpQuq3D/Hn2RmheLk34hTOzc/D/rfh9uXQe1bjHaeRaa1JKEhgS4ol/OPOxwEQ2j6UCQETmBgwkR5ePRo0K0oCXwhhdeVVNby5LY4P95ylnbszL87uw6zwLo0zhdNUBUsmQ0FyvRdMsUWpRalsS9nGtpRtHM85jkbj38aftye8TTevbvXapwS+EKLRRKUX8uzKk5xML2RCaEf+elNffL0aYaw9LwHeHQ1dB8LCteBgY6t4NVBueS7bU7azJ20Pr419jVZO9fs7tKvAlzF8IeyPqcbMx/uT+OfmOJRqxCmcxz6H1b+22oIpzZFdBf5FcoUvhP1JzS/j+dVR7IrLob+fJ6/cHE5YVytO4dTaEvjHV1geyOo+0Xr7biZqC3yDG2ALIZob//bufHz/UP69YCDpBeXM/s9eXt0YS3lVjXUOoBTM/Cd07A3fPgiFadbZbwsggS+EsDqlFHP6d2XrE2O5eaAv7+5KYMqbu9gdl2OdA7h4wG3LoKYKvr4faqqts99mTgJfCNFovNxd+Mf8/nzx0AicHRy4d+khHl9xlNySyobv3KcHzHkb0g7Blhcbvr8WwCYDXyk1Wyn1fmFhodGlCCGsYGQ3bzY8PprHJ/Zg48lMJv5zF18eTqHB9xD73gzDHoaD/4VTa6xTbDMmN22FEE0qPruEP646yaGz+QwLas/ieX3p0alN/XdoqoKPpkPOaXh4F3jXb+56cyI3bYUQNqF7x9aseGgEr90STlx2MTP+vYfXN52morqeN3WdXCxr4Do6wTf3W74BiKuSwBdCNDkHB8VtQ/3Z9sRYZvfvyn92xDP1zd31v6nr5Q9z/gPnjsO2P1u32GZEAl8IYRjv1q68cdsAPn9wOI5Kce/SQzz2+RGyiiquf2e9Z8HQB+HAf+DMVusX2wzY5Bi+PGkrRMtTUV3De7sS+e/OeFwcHXhick/uHRmIk+N1XJdWl8MHE6A0x9Jvp02nxivYhsmTtkIIu5CUW8oLa6PZHZdDWJe2vDyvL4MCrmOh8OwYeH8cBI6Cu74Fh5Y3kCE3bYUQdiHIx4NP7h/K/+4aRH5pFTf/bz9/+OYEeXWdu9+xN0z9GyRstwzviB9J4AshbI5Sihn9urD192N5aHQw3x5JY/zrO1l2IIkacx1GJYb8CkJnWW7gpskowUUS+EIIm9Xa1YnnZoax8fHR9PX15IU10cx+ey+Ryfm/vKFSlqdw23aF5fMhO7ZpCrZxEvhCCJvXo1Mblj84nP/eOYjzZVXc8s4BfrfiKOcKy2vfyL093LMaHJ3h05sg/2xTlWuzJPCFEHZBKcXM8C5s+/1YHh3fjQ1RmUx4fRf/3nam9oe2vLtZQt9UAcvmQlFGk9Zsa2wy8KWXjhCiNu4uTjw1NZRtT4xlfGgH3tgSx8R/7mLd8Yyr9+bpFGbpm1+WD8tugtLcJq/ZVsi0TCGEXTuYmMef150i5lwRgwK8eHZGb4YGtb/yg0n74LObwacnLFwHrbyavNamItMyhRDN0ogQb9b/5kb+fks/0s6XM//dAyxaFkF8dsnPPxh0A9y+3DJP/4PxkPKDMQUbSAJfCGH3HB0Utw8NYOdT43hySk/2J+Qx9c3d/HHVSbIvbdPQY5Jl8XOzCT6aBlteAJMVevPbCRnSEUI0O7kllby97QzLf0jB0UFxz4hAHhnXDZ/WrpYPVBbDpufgyCfQMQzmvQtd+htbtBVJawUhRIuTnFfKW9vOsPpoOm7OjiwcFcSi0SG083CxfCBuM6z9DZTlwtCHLM3XfLobW7QVSOALIVqshJwS3tp6hnUnMvBwcWLhqEDuGxVMhzaultk7m5+HE19ahnpCxlmCv+d0S499OySBL4Ro8U5nFvPvbWfYEHUOF0cH5g/xY9HobgR4u0NxFhxZBpEfQ1EatOlqWUIxaDQEjLCrWT0S+EIIcUFiTgnv705k5ZF0TGYzM8O78tDoYML9vKDGBGc2Q8RSOLsLaqpAOUDnfhB4I/gPg059oX0wODgafSpXJYEvhBCXySqqYOnesyz/IYWSShP9/b24Z0Qgs8K74ObsaOmvnxYBSXsheR+kHoKaC7N6nFpZOnN26mO54RswwnID2Aa+CUjgCyFELYoqqlkZmcanB5NJyCnFy92Z24f4c8ewAIJ9PH76YHUF5MRAVjRknYKsKMufyy48vevqafkJIGAEdJsAvoMMOR+7CnxZ8UoIYQStNQcS8vj0YDKbT2VRY9YM8PfipgFdmdW/60/TOn++ERSkQMpBSNlv+T3nQndO/+Ew6jfQa0aTXvnbVeBfJFf4QgijZBZWsOZYOquPZRBzrghHB8XoHj7MCu/K+F4d8L5a+F9UmgdR31gWYClIgfbdYOSjMOBOcG7V6LVL4AshRD2dzixm9bF01h7LIL2gHKVggL8XE3p1ZHxoR/p0bYtS6soNa0wQsxb2/xsyjoK7D4x4xDLnvxFn/UjgCyFEA5nNmuiMIrbHZrM9NovjaZaOvj6tXRgc2I6hQe0ZHNiOPl09cXG6pHON1pabvnvfhPgt4NIGhv4KRjzaKAutS+ALIYSV5RRXsuN0NgcT8jicnE9qvmVBFlcnB/r5etKzcxt6dWpDj06t6dWpjWUY6NwJ2PsvOLUaHJxhwALovwD8hlltwXUJfCGEaGTZRRVEJJ8nIuk8J9IKiMsqpqjC9OP7Xu7OdPVsRVcvN/q45TGlYAWhWRtwNFdS6dGV0h5z0X1vwSNgIK7OjlcfJqoDCXwhhGhiWmuyiyuJyyrmdGYxZ3NLOVdYQUZBOecKKygsr6Y1ZUxyOMIcx/2MdjiJs6ohwdwFpwXLCew9uF7HrS3w7bNRhBBC2AGlFJ3autGprRuje3S44v3SShPZxZUUlU+msLya7QXZeCV/T+dzW2nnZ/0mbhL4QghhEA9XJ4JdL43hDjCsD/D7RjmeLIAihBAthAS+EEK0EBL4QgjRQkjgCyFECyGBL4QQLYQEvhBCtBAS+EII0UJI4AshRAth060VlFI5QHI9N/cBcq1YjpGay7k0l/MAORdb1VzOpaHnEai1vuLRXpsO/IZQSkVcrZeEPWou59JczgPkXGxVczmXxjoPGdIRQogWQgJfCCFaiOYc+O8bXYAVNZdzaS7nAXIutqq5nEujnEezHcMXQgjxc835Cl8IIcQlJPCFEKKFaJaBr5SappQ6rZSKV0o9Y3Q99aWUWqqUylZKRRldS0MopfyVUjuUUqeUUtFKqceNrqm+lFJuSqlDSqnjF87lz0bX1BBKKUel1FGl1Hqja2kIpVSSUuqkUuqYUsqu10VVSnkppb5RSsUqpWKUUiOttu/mNoavlHIE4oDJQBpwGFigtT5laGH1oJQaA5QAy7TWfY2up76UUl2ALlrrI0qpNkAkcJOd/jdRgIfWukQp5QzsBR7XWh80uLR6UUo9AQwB2mqtZxldT30ppZKAIVpru3/oSin1CbBHa/2hUsoFcNdaF1hj383xCn8YEK+1TtRaVwErgLkG11QvWuvdQL7RdTSU1vqc1vrIhT8XAzGAr7FV1Y+2KLnwpfOFX3Z51aSU8gNmAh8aXYuwUEp5AmOAJQBa6yprhT00z8D3BVIv+ToNOw2X5kgpFQQMBH4wuJR6uzAMcgzIBrZore31XN4EngbMBtdhDRrYrJSKVEotMrqYBggGcoCPLgy1faiU8rDWzptj4AsbpZRqDXwL/E5rXWR0PfWlta7RWg8A/IBhSim7G25TSs0CsrXWkUbXYiU3aq0HAdOBRy8Mh9ojJ2AQ8I7WeiBQCljtPmRzDPx0wP+Sr/0uvCYMdGG8+1tgudZ6pdH1WMOFH7V3ANMMLqU+bgDmXBj7XgFMUEp9ZmxJ9ae1Tr/wezawCsvQrj1KA9Iu+anxGyzfAKyiOQb+YaCHUir4wg2PO4C1BtfUol240bkEiNFav2F0PQ2hlOqglPK68OdWWCYHxBpaVD1orZ/VWvtprYOw/BvZrrW+2+Cy6kUp5XFhMgAXhj+mAHY5s01rnQmkKqV6XXhpImC1yQ1O1tqRrdBam5RSjwGbAEdgqdY62uCy6kUp9QUwDvBRSqUBL2qtlxhbVb3cANwDnLww9g3wR631BuNKqrcuwCcXZoM5AF9pre16SmMz0AlYZbmuwAn4XGv9vbElNchvgOUXLlgTgfutteNmNy1TCCHE1TXHIR0hhBBXIYEvhBAthAS+EEK0EBL4QgjRQkjgCyFECyGBL4QQLYQEvhBCtBD/H3aDXq8/wG/JAAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "from matplotlib import pyplot as plt\n", - "tlst = np.arange(len(su.energies)) * tau * 100\n", - "plt.plot(tlst, (np.asarray(su.energies)-efci)/abs(efci), label=\"chi=12\")\n", - "plt.plot(tlst, (np.asarray(su1.energies)-efci)/abs(efci), label=\"chi=16\")\n", - "plt.plot(tlst, (np.asarray(su2.energies)-efci)/abs(efci), label=\"chi=24\")\n", - "plt.yscale(\"log\")\n", - "plt.legend()" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "n=6000, tau=0.001, energy~-5.702278: 100%|██████████| 6000/6000 [16:12<00:00, 6.17it/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0, 0): SZ=0.0131, N=0.9095\n", - "(0, 1): SZ=-0.0134, N=0.9091\n", - "(1, 0): SZ=-0.0135, N=0.9087\n", - "(1, 1): SZ=0.0138, N=0.9098\n" - ] - } - ], - "source": [ - "su3 = SimpleUpdate(\n", - " psi,\n", - " Ham,\n", - " chi=128, # boundary contraction bond dim for computing energy\n", - " D = 36,\n", - " compute_energy_every=100,\n", - " compute_energy_per_site=False,\n", - " keep_best=True,\n", - " ordering = 'random',\n", - " gauge_smudge = 1e-6,\n", - " callback=None\n", - ")\n", - "tau = 0.001\n", - "su3.evolve(6000, tau=tau)\n", - "\n", - "sz_expecs = su3.get_state().compute_local_expectation(sz_ops, return_all=True, normalized=True)\n", - "n_expecs = su3.get_state().compute_local_expectation(n_ops, return_all=True, normalized=True)\n", - "\n", - "for ix, iy in itertools.product(range(Lx), range(Ly)):\n", - " print(\"(%i, %i): SZ=%.4f, N=%.4f\"%(ix,iy,sz_expecs[(ix,iy)][0], n_expecs[(ix,iy)][0]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} From 90add79fbcec11a1e21a855374e6d44dafe88f97 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 21 May 2021 15:28:24 -0700 Subject: [PATCH 53/61] add func to obtain contraction path for block tensors --- quimb/tensor/tensor_block.py | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/quimb/tensor/tensor_block.py b/quimb/tensor/tensor_block.py index 194020d9..58bd3daf 100644 --- a/quimb/tensor/tensor_block.py +++ b/quimb/tensor/tensor_block.py @@ -5,12 +5,12 @@ import functools import numpy as np +import opt_einsum as oe from ..utils import (check_opt, oset) from .drawing import draw_tn -from .tensor_core import Tensor, TensorNetwork, _parse_split_opts, oset_union, tags_to_oset, rand_uuid, _parse_split_opts -from .tensor_core import tensor_contract as _tensor_contract +from .tensor_core import Tensor, TensorNetwork, _parse_split_opts, oset_union, tags_to_oset, rand_uuid, _parse_split_opts, concat, unique, _gen_output_inds, _inds_to_eq, get_contraction from .block_tools import apply, get_smudge_balance from .block_interface import dispatch_settings @@ -30,10 +30,40 @@ def _core_contract(T1, T2): else: return T1.__class__(data=o_array, inds=o_ix, tags=o_tags) +def get_block_contraction_path_info(*tensors, **contract_opts): + i_ix = tuple(t.inds for t in tensors) # input indices per tensor + total_ix = tuple(concat(i_ix)) # list of all input indices + all_ix = tuple(unique(total_ix)) + + o_ix = tuple(_gen_output_inds(total_ix)) + + # possibly map indices into the range needed by opt-einsum + eq = _inds_to_eq(all_ix, i_ix, o_ix) + + size_dict = dict() + for T in tensors: + i_shape = T.shape + for ax, ix in enumerate(T.inds): + if ix not in size_dict: + size_dict[ix] = i_shape[ax] + else: + size_dict[ix] = max(i_shape[ax], size_dict[ix]) + + ops = [] + for T in tensors: + i_shape = [size_dict[ix] for ix in T.inds] + ops.append(tuple(i_shape)) + + path_info = get_contraction(eq, *ops, get='info', **contract_opts) + path_info.quimb_symbol_map = { + oe.get_symbol(i): ix for i, ix in enumerate(all_ix) + } + return path_info + def tensor_contract(*tensors, output_inds=None, **contract_opts): if len(tensors) == 1: return tensors[0] - path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) + path_info = get_block_contraction_path_info(*tensors, **contract_opts) tensors = list(tensors) for conc in path_info.contraction_list: pos1, pos2 = sorted(conc[0]) From 1d83286575908e911efd7db84534e11d6ee7c167 Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 21 May 2021 15:35:20 -0700 Subject: [PATCH 54/61] bugfix --- quimb/tensor/fermion.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index 1ca25c6d..aabb191a 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -10,9 +10,8 @@ from .drawing import draw_tn from .tensor_core import Tensor, TensorNetwork, _parse_split_opts, oset_union, tags_to_oset, rand_uuid, _parse_split_opts -from .tensor_core import tensor_contract as _tensor_contract from .tensor_block import tensor_split as _tensor_split -from .tensor_block import _core_contract, tensor_canonize_bond, tensor_compress_bond, BlockTensor, BlockTensorNetwork +from .tensor_block import _core_contract, tensor_canonize_bond, tensor_compress_bond, BlockTensor, BlockTensorNetwork, get_block_contraction_path_info from .block_tools import apply, get_smudge_balance from .block_interface import dispatch_settings from functools import wraps @@ -358,7 +357,7 @@ def tensor_contract(*tensors, output_inds=None, inplace=False, **contract_opts): return tensors[0] else: return tensors[0].copy() - path_info = _tensor_contract(*tensors, get='path-info', **contract_opts) + path_info = get_block_contraction_path_info(*tensors, **contract_opts) fs, tid_lst = _fetch_fermion_space(*tensors, inplace=inplace) if inplace: tensors = list(tensors) From e53892cdafb4cb1e433f138a42f0183f42630c1f Mon Sep 17 00:00:00 2001 From: yangcal Date: Fri, 28 May 2021 16:30:37 -0700 Subject: [PATCH 55/61] add random methods for block tensors and fermion PEPS --- quimb/tensor/block_gen.py | 403 +++++++++++++++++++++ quimb/tensor/block_interface.py | 5 +- quimb/tensor/fermion_2d.py | 107 +++++- quimb/tensor/fermion_gen.py | 58 --- quimb/tensor/test/test_block_numerics.py | 92 ++--- quimb/tensor/test/test_fermion_2d.py | 80 ++-- quimb/tensor/test/test_fermion_numerics.py | 93 ++--- 7 files changed, 658 insertions(+), 180 deletions(-) create mode 100644 quimb/tensor/block_gen.py delete mode 100644 quimb/tensor/fermion_gen.py diff --git a/quimb/tensor/block_gen.py b/quimb/tensor/block_gen.py new file mode 100644 index 00000000..15e126f2 --- /dev/null +++ b/quimb/tensor/block_gen.py @@ -0,0 +1,403 @@ +import numpy as np +from itertools import product + +from ..gen.rand import randn, seed_rand +from .block_interface import dispatch_settings, get_symmetry + +from pyblock3.algebra.core import SubTensor +from pyblock3.algebra.fermion import SparseFermionTensor +from pyblock3.algebra.symmetry import BondInfo + +def backend_wrapper(func): + def new_func(*args, **kwargs): + T = func(*args, **kwargs) + use_cpp = dispatch_settings("use_cpp") + if use_cpp: + T = T.to_flat() + return T + return new_func + +def _dispatch_dq(dq, symmetry): + '''Construct pyblock3 fermion symmetry object + + Parameters + ---------- + dq : int or tuple of integers + Quantum particle number(s) + symmetry : fermion symmetry class + + Returns + ------- + Fermion symmetry object + ''' + if dq is None: + dq = (0, ) + elif isinstance(dq, (int, np.integer, np.float)): + dq = (int(dq), ) + dq = symmetry(*dq) + return dq + +@backend_wrapper +def rand_single_block(shape, dtype=float, seed=None, + pattern=None, dq=None, ind=None): + '''Construct random block tensor with one block + + Parameters + ---------- + shape : tuple or list of integers + shape for the single block + dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional + The underlying data type. + seed : int, optional + A random seed. + pattern : string consisting of ("+", "-"), optional + The symmetry pattern for each dimension + dq : int or tuple of integers, optional + The net particle number(s) in this tensor, default is 0 + ind: int, optional + The axis to dispatch the dq symmetry + + Returns + ------- + Block tensor + ''' + if seed is not None: + np.random.seed(seed) + symmetry = get_symmetry() + dq = _dispatch_dq(dq, symmetry) + if pattern is None: + pattern = "-" * (len(shape)-1) + "+" + if ind is None: + try: + ind = pattern.index("+") + except: + ind = 0 + if pattern[ind] == "-": + dq = - dq + q_labels = [dq if ix==ind else symmetry(0) for ix in range(len(shape))] + array = randn(shape, dtype=dtype) + blk = SubTensor(reduced=array, q_labels=q_labels) + T = SparseFermionTensor(blocks=[blk, ], pattern=pattern) + return T + +@backend_wrapper +def ones_single_block(shape, pattern=None, dq=None, ind=None): + '''Construct block tensor filled with ones with a single block + + Parameters + ---------- + shape : tuple or list of integers + shape for the single block + pattern : string consisting of ("+", "-"), optional + The symmetry pattern for each dimension + dq : int or tuple of integers, optional + The net particle number(s) in this tensor, default is 0 + ind: int, optional + The axis to dispatch the dq symmetry + + Returns + ------- + Block tensor + ''' + symmetry = get_symmetry() + dq = _dispatch_dq(dq, symmetry) + if pattern is None: + pattern = "-" * (len(shape)-1) + "+" + if ind is None: + try: + ind = pattern.index("+") + except: + ind = 0 + if pattern[ind] == "-": + dq = - dq + q_labels = [dq if ix==ind else symmetry(0) for ix in range(len(shape))] + array = np.ones(shape) + blk = SubTensor(reduced=array, q_labels=q_labels) + T = SparseFermionTensor(blocks=[blk, ], pattern=pattern) + return T + +@backend_wrapper +def rand_all_blocks(shape, symmetry_info, dtype=float, + seed=None, pattern=None, dq=None): + '''Construct block tensor with specified blocks + + Parameters + ---------- + shape : tuple or list of integers + shape for all blocks + symmetry_info: tuple / list of tuple/list of integers + allowed quantum numbers for each dimension, eg, [(0,1),(0,1),(0,1,2)] + means allowed quantum numbers for the three dimensions are + (0,1), (0,1) and (0,1,2) respectively. For U1 \otimes U1 symmetry, + this could be [((0,0), (1,1), (1,-1)), ((0,0), (1,1), (1,-1)), + (0,0),(1,1),(1,-1),(2,0)] where each tuple denotes a particle + number and SZ number pair + dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional + The underlying data type. + seed : int, optional + A random seed. + pattern : string consisting of ("+", "-"), optional + The symmetry pattern for each dimension + dq : int or tuple of integers, optional + The net particle number(s) in this tensor, default is 0 + + Returns + ------- + Block tensor + ''' + if seed is not None: + np.random.seed(seed) + symmetry = get_symmetry() + dq = _dispatch_dq(dq, symmetry) + bond_infos = [] + for sh, ibonds in zip(shape, symmetry_info): + bonds = [] + for ibond in ibonds: + if isinstance(ibond, (int, np.integer)): + bonds.append(symmetry(ibond)) + else: + bonds.append(symmetry(*ibond)) + bonds = dict(zip(bonds, [sh,]*len(bonds))) + bond_infos.append(BondInfo(bonds)) + T = SparseFermionTensor.random(bond_infos, pattern=pattern, dq=dq, dtype=dtype) + return T + +def gen_2d_bonds(*args): + symmetry = dispatch_settings("symmetry") + func = {"U1": gen_2d_bonds_u1, + "Z2": gen_2d_bonds_z2, + "Z22": gen_2d_bonds_z22, + "U11": gen_2d_bonds_u11}[symmetry] + return func(*args) + +def gen_2d_bonds_z2(pnarray, physical_infos): + r'''Construct Z2 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + pnarray : array_like + Net Z2 symmetry for each site + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + number of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], int] + A dictionary mapping the site coordinates to the net Z2 symmetry + on that site + ''' + Lx, Ly = pnarray.shape + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + nvir = (ix != Lx - 1) + (ix != 0) +\ + (iy != Ly - 1) + (iy != 0) + symmetry_infos[ix,iy] = [(0,1)] * nvir + [tuple(physical_infos[ix][iy])] + dq_infos[ix,iy]= pnarray[ix,iy] + return symmetry_infos, dq_infos + +def gen_2d_bonds_z22(n1array, n2array, physical_infos): + r'''Construct Z2 \otimes Z2 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + n1array : array_like + First entry of the net Z2 symmetry pairs for each site + n2array : array_like + Second entry of the net Z2 symmetry pairs for each site + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + number pairs of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple] + A dictionary mapping the site coordinates to the allowed quantum particle + number pairs in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], tuple of integers] + A dictionary mapping the site coordinates to the net quantum particle number + pair on that site + ''' + Lx, Ly = n1array.shape + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + nvir = (ix != Lx - 1) + (ix != 0) +\ + (iy != Ly - 1) + (iy != 0) + symmetry_infos[ix,iy] = [((0,0),(0,1),(1,0),(1,1))] * nvir + [tuple(physical_infos[ix][iy])] + dq_infos[ix,iy]= (n1array[ix,iy], n2array[ix,iy]) + return symmetry_infos, dq_infos + +def gen_2d_bonds_u1(pnarray, physical_infos): + r'''Construct U1 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + pnarray : array_like + The net particle number inflow for each site + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + number of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], int] + A dictionary mapping the site coordinates to the net quantum particle number + on that site + ''' + Lx, Ly = pnarray.shape + s_type = (Lx % 2==0) + vbonds = [[0 for _ in range(Ly)] for _ in range(Lx+1)] + hbonds = [[0 for _ in range(Ly+1)] for _ in range(Lx)] + def _get_bond(ix, iy, *directions): + bond_dict = {"r": hbonds[ix][iy+1], + "l": hbonds[ix][iy], + "u": vbonds[ix+1][iy], + "d": vbonds[ix][iy]} + return [bond_dict[ix] for ix in directions] + + ave = np.sum(pnarray)/pnarray.size + for ix in range(Lx): + sweep_left = (s_type and ix%2==0) or (not s_type and ix%2==1) + if sweep_left: + for iy in range(Ly-1,-1,-1): + if iy ==0: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + right, down, up = _get_bond(ix, iy, "r", "d", "u") + hbonds[ix][iy] = pnarray[ix][iy] + up + right - down - ave + else: + for iy in range(Ly): + if iy ==Ly-1: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + left, up, down = _get_bond(ix, iy, "l", "u", "d") + hbonds[ix][iy+1] = down + left + ave - up - pnarray[ix][iy] + + hbonds = np.asarray(hbonds)[:,1:-1] + vbonds = np.asarray(vbonds)[1:-1] + + def _round_to_bond(bd): + if bd.is_integer(): + ibond = np.rint(bd).astype(int) + return [ibond-1, ibond, ibond+1] + else: + ibond = np.floor(bd).astype(int) + return [ibond, ibond+1] + + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + block = [] + if ix != Lx - 1: # bond up + block.append(_round_to_bond(vbonds[ix,iy])) + if iy != Ly - 1: # bond right + block.append(_round_to_bond(hbonds[ix,iy])) + if ix != 0: # bond down + block.append(_round_to_bond(vbonds[ix-1,iy])) + if iy != 0: # bond left + block.append(_round_to_bond(hbonds[ix,iy-1])) + block.append(physical_infos[ix][iy]) + symmetry_infos[ix,iy] = block + dq_infos[ix,iy]=pnarray[ix,iy] + return symmetry_infos, dq_infos + +def gen_2d_bonds_u11(pnarray, szarray, physical_infos): + r'''Construct U1 \otime U1 symmetry informations for each leg for 2d Fermionic TensorNetwork + + Parameters + ---------- + pnarray : array_like + The net particle number inflow for each site + szarray : array_like + The net SZ number inflow for each site, the parity for each site must be + consistent with pnarray + physical_infos : dict[tuple[int], tuple/list of integers] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers (particle number and SZ number pair) of the physical dimension + + Returns + ------- + symmetry_infos : dict[tuple[int], list/tuple] + A dictionary mapping the site coordinates to the allowed particle number + and SZ number pairs in each dimension ordered by up, right, down, left and physical. + dq_infos: dict[tuple[int], tuple of integers] + A dictionary mapping the site coordinates to the net quantum particle number + and SZ number pair on that site + ''' + + Lx, Ly = pnarray.shape + if not np.allclose(pnarray % 2, szarray % 2): + raise ValueError("parity inconsistent") + if abs(szarray).max()>1: + raise ValueError("net |SZ| >1 not supported yet") + s_type = (Lx % 2==0) + vbonds = [[0 for _ in range(Ly)] for _ in range(Lx+1)] + hbonds = [[0 for _ in range(Ly+1)] for _ in range(Lx)] + def _get_bond(ix, iy, *directions): + bond_dict = {"r": hbonds[ix][iy+1], + "l": hbonds[ix][iy], + "u": vbonds[ix+1][iy], + "d": vbonds[ix][iy]} + return [bond_dict[ix] for ix in directions] + + ave = np.sum(pnarray)/pnarray.size + for ix in range(Lx): + sweep_left = (s_type and ix%2==0) or (not s_type and ix%2==1) + if sweep_left: + for iy in range(Ly-1,-1,-1): + if iy ==0: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + right, down, up = _get_bond(ix, iy, "r", "d", "u") + hbonds[ix][iy] = pnarray[ix][iy] + up + right - down - ave + else: + for iy in range(Ly): + if iy ==Ly-1: + right, left, down = _get_bond(ix, iy, "r", "l", "d") + vbonds[ix+1][iy] = down + left + ave - right - pnarray[ix][iy] + else: + left, up, down = _get_bond(ix, iy, "l", "u", "d") + hbonds[ix][iy+1] = down + left + ave - up - pnarray[ix][iy] + hbonds = np.asarray(hbonds)[:,1:-1] + vbonds = np.asarray(vbonds)[1:-1] + + def _round_to_bond(bd): + if bd.is_integer(): + ibond = np.rint(bd).astype(int) + if ibond % 2==0: + return [(ibond-1,1),(ibond-1,-1), (ibond,0), (ibond+1,-1), (ibond+1,1)] + else: + return [(ibond-1, 0), (ibond, 1), (ibond, -1), (ibond+1, 0)] + else: + ibond = np.floor(bd).astype(int) + if ibond % 2==0: + return [(ibond,0), (ibond+1,-1), (ibond+1,1)] + else: + return [(ibond, 1), (ibond, -1), (ibond+1, 0)] + symmetry_infos = dict() + dq_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + block = [] + if ix != Lx - 1: # bond up + block.append(_round_to_bond(vbonds[ix,iy])) + if iy != Ly - 1: # bond right + block.append(_round_to_bond(hbonds[ix,iy])) + if ix != 0: # bond down + block.append(_round_to_bond(vbonds[ix-1,iy])) + if iy != 0: # bond left + block.append(_round_to_bond(hbonds[ix,iy-1])) + block.append(physical_infos[ix][iy]) + symmetry_infos[ix,iy] = block + dq_infos[ix,iy]= (pnarray[ix,iy],szarray[ix,iy]) + return symmetry_infos, dq_infos diff --git a/quimb/tensor/block_interface.py b/quimb/tensor/block_interface.py index 8be9ee74..119accf9 100644 --- a/quimb/tensor/block_interface.py +++ b/quimb/tensor/block_interface.py @@ -26,7 +26,7 @@ def set_fermion(use_fermion): this.USE_FERMION = use_fermion setting.set_fermion(use_fermion) -def set(**kwargs): +def set_options(**kwargs): symmetry = kwargs.pop("symmetry", this.DEFAULT_SYMMETRY) use_fermion = kwargs.pop("fermion", this.USE_FERMION) use_cpp = kwargs.pop("use_cpp", this.USE_CPP) @@ -47,6 +47,9 @@ def dispatch_settings(*keys): _settings = _settings[0] return _settings +def get_symmetry(): + return setting.symmetry_map[this.DEFAULT_SYMMETRY] + to_exponential = fermion_ops.get_exponential H1 = fermion_ops.H1 Hubbard = fermion_ops.Hubbard diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index c4daac12..89322252 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -31,6 +31,7 @@ FermionTensorNetwork, tensor_contract ) +from .block_gen import rand_all_blocks, ones_single_block INVERSE_CUTOFF = 1e-10 @@ -1044,9 +1045,109 @@ def __init__(self, arrays, *, shape='urdlp', tags=None, super().__init__(tensors, virtual=True, **tn_opts) @classmethod - def rand(cls, Lx, Ly, bond_dim, phys_dim=2, - dtype=float, seed=None, **peps_opts): - raise NotImplementedError + def rand(cls, Lx, Ly, bond_dim, symmetry_infos, dq_infos, + phys_dim=2, seed=None, dtype=float, **peps_opts): + r'''Construct a random 2d FPEPS with given quantum particle number distribution + + Parameters + ---------- + Lx : int + The number of rows. + Ly : int + The number of columns. + bond_dim: int + Virtual bond dimension for each virtual block + symmetry_infos : dict[tuple[int], list/tuple] + A dictionary mapping the site coordinates to the allowed quantum particle + numbers in each dimension ordered by up, right, down, left and physical, + which will be supplied to ``rand_all_blocks`` + dq_infos: dict[tuple[ix], int or tuple/list of integers] + A dictionary mapping the site coordinates to the net quantum particle numbers + on that site, which will be supplied to ``rand_all_blocks`` + phys_dim: int + Physical bond dimension for each physical block + seed : int, optional + A random seed. + dtype : {'float64', 'complex128', 'float32', 'complex64'}, optional + The underlying data type. + pepes_opts + Supplied to :class:`~quimb.tensor.fermion_2d.FPEPS`. + + Returns + ------- + FPEPS + ''' + if seed is not None: + np.random.seed(seed) + pattern_map = {"d": "+", "l":"+", "p":"+", + "u": "-", "r":"-"} + + arrays = [[None for _ in range(Ly)] for _ in range(Lx)] + for i, j in product(range(Lx), range(Ly)): + shape = [] + pattern = "" + if i != Lx - 1: # bond up + shape.append(bond_dim) + pattern += pattern_map['u'] + if j != Ly - 1: # bond right + shape.append(bond_dim) + pattern += pattern_map['r'] + if i != 0: # bond down + shape.append(bond_dim) + pattern += pattern_map['d'] + if j != 0: # bond left + shape.append(bond_dim) + pattern += pattern_map['l'] + shape.append(phys_dim) + pattern += pattern_map['p'] + symmetry_info = symmetry_infos[i, j] + arrays[i][j] = rand_all_blocks(shape, symmetry_info, dtype=dtype, pattern=pattern, dq=dq_infos[i, j]) + return FPEPS(arrays, **peps_opts) + + @classmethod + def gen_site_prod_state(cls, Lx, Ly, phys_infos, phys_dim=1, **peps_opts): + r'''Construct a 2d FPEPS as site product state + + Parameters + ---------- + Lx : int + The number of rows. + Ly : int + The number of columns. + phys_infos: dict[tuple[int], int or tuple/list] + A dictionary mapping the site coordinates to the specified single quantum + particle state + phys_dim: int + Physical bond dimension for the physical block + pepes_opts + Supplied to :class:`~quimb.tensor.fermion_2d.FPEPS`. + + Returns + ------- + FPEPS + ''' + pattern_map = {"d": "+", "l":"+", "p":"+", + "u": "-", "r":"-"} + arrays = [[None for _ in range(Ly)] for _ in range(Lx)] + for i, j in product(range(Lx), range(Ly)): + shape = [] + pattern = "" + if i != Lx - 1: # bond up + shape.append(1) + pattern += pattern_map['u'] + if j != Ly - 1: # bond right + shape.append(1) + pattern += pattern_map['r'] + if i != 0: # bond down + shape.append(1) + pattern += pattern_map['d'] + if j != 0: # bond left + shape.append(1) + pattern += pattern_map['l'] + shape.append(phys_dim) + pattern += pattern_map['p'] + arrays[i][j] = ones_single_block(shape, pattern, phys_infos[i, j], ind=len(shape)-1) + return FPEPS(arrays, **peps_opts) def add_PEPS(self, other, inplace=False): raise NotImplementedError diff --git a/quimb/tensor/fermion_gen.py b/quimb/tensor/fermion_gen.py deleted file mode 100644 index 2dbf0b60..00000000 --- a/quimb/tensor/fermion_gen.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np -from pyblock3.algebra.core import SubTensor -from pyblock3.algebra.fermion import SparseFermionTensor -from pyblock3.algebra import fermion_encoding -import quimb.tensor.block_interface as bitf -from quimb.tensor.fermion_2d import FPEPS - -pattern_map = {"d": "+", "l":"+", "p":"+", - "u": "-", "r":"-"} - -def _gen_site_tsr(state, pattern=None, ndim=2, ax=0, symmetry=None, use_cpp=None): - if symmetry is None: - symmetry = bitf.dispatch_settings("symmetry") - if use_cpp is None: - use_cpp = bitf.dispatch_settings("use_cpp") - state_map = fermion_encoding.get_state_map(symmetry) - if state not in state_map: - raise KeyError("requested state not recoginized") - qlab, ind, dim = state_map[state] - symmetry = qlab.__class__ - q_label = [symmetry(0),]*ax + [qlab] + [symmetry(0),] * (ndim-ax-1) - shape = [1,] * ax + [dim,] +[1,] *(ndim-ax-1) - dat = np.zeros(shape) - ind = (0,)* ax + (ind,) + (0,) * (ndim-ax-1) - dat[ind] = 1 - blocks = [SubTensor(reduced=dat, q_labels=q_label)] - T = SparseFermionTensor(blocks=blocks, pattern=pattern) - if use_cpp: - T = T.to_flat() - return T - -def gen_mf_peps(state_array, shape='urdlp', symmetry=None, use_cpp=None, **kwargs): - if symmetry is None: - symmetry = bitf.dispatch_settings("symmetry") - Lx, Ly = state_array.shape - arr = state_array.astype("int") - cache = dict() - def _gen_ij(i, j): - state = arr[i, j] - array_order = shape - if i == Lx - 1: - array_order = array_order.replace('u', '') - if j == Ly - 1: - array_order = array_order.replace('r', '') - if i == 0: - array_order = array_order.replace('d', '') - if j == 0: - array_order = array_order.replace('l', '') - pattern = "".join([pattern_map[i] for i in array_order]) - ndim = len(array_order) - ax = array_order.index('p') - key = (state, ndim, ax, pattern) - if key not in cache: - cache[key] = _gen_site_tsr(state, pattern, ndim, ax, symmetry, use_cpp).copy() - return cache[key] - - tsr_array = [[_gen_ij(i,j) for j in range(Ly)] for i in range(Lx)] - return FPEPS(tsr_array, shape=shape, **kwargs) diff --git a/quimb/tensor/test/test_block_numerics.py b/quimb/tensor/test/test_block_numerics.py index b4e75aad..fc744301 100644 --- a/quimb/tensor/test/test_block_numerics.py +++ b/quimb/tensor/test/test_block_numerics.py @@ -2,29 +2,26 @@ import numpy as np from quimb.tensor.tensor_block import ( BlockTensor, BlockTensorNetwork, tensor_contract) -from quimb.tensor.block_interface import BondInfo, U11, U1, Z2, Z4, Z22, set -from pyblock3.algebra.fermion import SparseFermionTensor - -set(fermion=False) -rand = SparseFermionTensor.random +from quimb.tensor.block_gen import rand_all_blocks as rand +from quimb.tensor.block_interface import set_options @pytest.fixture(scope='class') def u11setup(request): - bond1 = BondInfo({U11(0):3, U11(1,1): 3, U11(1,-1):3, U11(2):3}) - bond2 = BondInfo({U11(0):5, U11(1,1): 5, U11(1,-1):5, U11(2):5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U11(1,1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(-1,-1), pattern="++-").to_flat() + bond = [(0,0), (1,1), (1,-1), (2,0)] + set_options(symmetry="u11", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(1,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(-1,-1)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,-1)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(-1,1)) - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,-1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(-1,1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=U11(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=U11(1,-1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,-1)) Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -34,21 +31,22 @@ def u11setup(request): @pytest.fixture(scope='class') def z22setup(request): - bond1 = BondInfo({Z22(0):3, Z22(0,1): 3, Z22(1,0):3, Z22(1,1):3}) - bond2 = BondInfo({Z22(0):5, Z22(0,1): 5, Z22(1,0):5, Z22(1,1):5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z22(0,1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z22(1,0), pattern="++-").to_flat() + bond = [(0,0), (0,1), (1,0), (1,1)] + set_options(symmetry="z22", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(0,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(1,0)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,0)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(0,1)) - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z22(1,0), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z22(0,1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=Z22(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z22(1,0), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,0)) + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -58,13 +56,13 @@ def z22setup(request): @pytest.fixture(scope='class') def u1setup(request): - bond1 = BondInfo({U1(0):3, U1(1): 3, U1(3):3, U1(2):3}) - bond2 = BondInfo({U1(0):5, U1(1): 5, U1(3):5, U1(2):5}) + bond = (0,1,2,3) + set_options(symmetry="u1", fermion=False) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(2), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(-2), pattern="+-+").to_flat() + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=-1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=-2) request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) @@ -72,8 +70,9 @@ def u1setup(request): request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=U1(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=U1(1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -83,21 +82,22 @@ def u1setup(request): @pytest.fixture(scope='class') def z4setup(request): - bond1 = BondInfo({Z4(0):3, Z4(1): 3, Z4(3):3, Z4(2):3}) - bond2 = BondInfo({Z4(0):5, Z4(1): 5, Z4(3):5, Z4(2):5}) + bond = (0,1,2,3) + set_options(symmetry="z4", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=0) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=1) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z4(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(2), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(0), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=Z4(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z4(1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -107,20 +107,22 @@ def z4setup(request): @pytest.fixture(scope='class') def z2setup(request): - bond1 = BondInfo({Z2(0):3, Z2(1): 3}) - bond2 = BondInfo({Z2(0):5, Z2(1): 5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(0), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z2(1), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(0), pattern="+-+").to_flat() + bond = (0,1) + set_options(symmetry="z2", fermion=False) + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=0) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=1) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=0) + request.cls.Tabc = Tabc = BlockTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = BlockTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = BlockTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = BlockTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = BlockTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=Z2(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z2(1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + Tab = BlockTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = BlockTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = BlockTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) diff --git a/quimb/tensor/test/test_fermion_2d.py b/quimb/tensor/test/test_fermion_2d.py index 5a08a450..196af9aa 100644 --- a/quimb/tensor/test/test_fermion_2d.py +++ b/quimb/tensor/test/test_fermion_2d.py @@ -1,76 +1,98 @@ import pytest import numpy as np -import itertools -from quimb.tensor.block_interface import BondInfo, SparseFermionTensor, U11, U1, Z4, Z2, Z22 -from quimb.tensor.fermion_gen import gen_mf_peps +from itertools import product +from quimb.tensor.block_interface import set_options +from quimb.tensor.fermion_2d import FPEPS +from quimb.tensor.block_gen import rand_all_blocks as rand +set_options(fermion=True) @pytest.fixture(scope='class') def u11setup(request): - bond = BondInfo({U11(0):1, U11(2): 1, U11(1,-1):1, U11(1,1):1}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + bond = ((0,0),(1,1),(1,-1),(2,0)) + set_options(symmetry="u11") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") request.cls.G = G request.cls.Hij = Hij request.cls.Lx = Lx = 3 request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="U11") + state_map = {0:(0,0), 1:(1,1), 2:(1,-1), 3:(2,0)} + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = state_map[np.random.randint(0,4)] + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) for itsr in request.cls.peps.tensor_map.values(): itsr.data.data *= np.random.random(itsr.data.data.size) * 5 @pytest.fixture(scope='class') def z22setup(request): - bond = BondInfo({Z22(0):1, Z22(0,1): 1, Z22(1,0):1, Z22(1,1):1}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + bond = ((0,0),(0,1),(1,0),(1,1)) + set_options(symmetry="z22") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") request.cls.G = G request.cls.Hij = Hij request.cls.Lx = Lx = 3 request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="Z22") + state_map = {0:(0,0), 1:(0,1), 2:(1,1), 3:(2,0)} + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = state_map[np.random.randint(0,4)] + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) for itsr in request.cls.peps.tensor_map.values(): itsr.data.data *= np.random.random(itsr.data.data.size) * 5 @pytest.fixture(scope='class') def u1setup(request): - bond = BondInfo({U1(0):1, U1(1): 2, U1(2):1}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + bond = (0,1,2) + set_options(symmetry="u1") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + request.cls.G = G request.cls.Hij = Hij request.cls.Lx = Lx = 3 request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="U1") + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = np.random.randint(0,3) + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) for itsr in request.cls.peps.tensor_map.values(): itsr.data.data *= np.random.random(itsr.data.data.size) * 5 @pytest.fixture(scope='class') def z4setup(request): - bond = BondInfo({Z4(0):2, Z4(1): 2}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + bond = (0,1,2,3) + set_options(symmetry="z4") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + request.cls.G = G request.cls.Hij = Hij request.cls.Lx = Lx = 3 request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="z4") + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = np.random.randint(0,4) + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) for itsr in request.cls.peps.tensor_map.values(): itsr.data.data *= np.random.random(itsr.data.data.size) * 5 @pytest.fixture(scope='class') def z2setup(request): - bond = BondInfo({Z2(0):2, Z2(1): 2}) - G = SparseFermionTensor.random((bond, bond), pattern="+-").to_flat() - Hij = SparseFermionTensor.random((bond,)*4, pattern="++--").to_flat() + bond = (0,1) + set_options(symmetry="z2") + G = rand((1,1), [bond]*2, pattern="+-") + Hij = rand((1,1,1,1), [bond]*4, pattern="++--") + request.cls.G = G request.cls.Hij = Hij request.cls.Lx = Lx = 3 request.cls.Ly = Ly = 3 - request.cls.state_array = state_array = np.random.randint(0, 4, Lx*Ly).reshape(Lx, Ly) - request.cls.peps = gen_mf_peps(state_array, symmetry="z2") + phys_infos = dict() + for ix, iy in product(range(Lx), range(Ly)): + phys_infos[ix,iy] = np.random.randint(0,2) + request.cls.peps = FPEPS.gen_site_prod_state(Lx, Ly, phys_infos, phys_dim=1) for itsr in request.cls.peps.tensor_map.values(): itsr.data.data *= np.random.random(itsr.data.data.size) * 5 @@ -171,7 +193,7 @@ def test_normalize(self): def test_compute_local_expectation_one_sites(self): peps = self.peps - coos = list(itertools.product(range(self.Lx), range(self.Ly))) + coos = list(product(range(self.Lx), range(self.Ly))) terms = {coo: self.G for coo in coos} expecs = peps.compute_local_expectation( diff --git a/quimb/tensor/test/test_fermion_numerics.py b/quimb/tensor/test/test_fermion_numerics.py index 9d9e5b78..77d94522 100644 --- a/quimb/tensor/test/test_fermion_numerics.py +++ b/quimb/tensor/test/test_fermion_numerics.py @@ -2,29 +2,28 @@ import numpy as np from quimb.tensor.fermion import ( FermionTensor, FermionTensorNetwork, tensor_contract) -from quimb.tensor.block_interface import BondInfo, U11, U1, Z2, Z4, Z22, set -from pyblock3.algebra.fermion import SparseFermionTensor +from quimb.tensor.block_gen import rand_all_blocks as rand +from quimb.tensor.block_interface import set_options, set_symmetry -set(fermion=True) -rand = SparseFermionTensor.random +set_options(fermion=True) @pytest.fixture(scope='class') def u11setup(request): - bond1 = BondInfo({U11(0):3, U11(1,1): 3, U11(1,-1):3, U11(2):3}) - bond2 = BondInfo({U11(0):5, U11(1,1): 5, U11(1,-1):5, U11(2):5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U11(1,1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U11(-1,-1), pattern="++-").to_flat() + bond = [(0,0), (1,1), (1,-1), (2,0)] + set_options(symmetry="u11") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(1,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(-1,-1)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,-1)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(-1,1)) - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U11(1,-1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U11(-1,1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=U11(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=U11(1,-1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,-1)) Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -34,21 +33,22 @@ def u11setup(request): @pytest.fixture(scope='class') def z22setup(request): - bond1 = BondInfo({Z22(0):3, Z22(0,1): 3, Z22(1,0):3, Z22(1,1):3}) - bond2 = BondInfo({Z22(0):5, Z22(0,1): 5, Z22(1,0):5, Z22(1,1):5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z22(0,1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z22(1,0), pattern="++-").to_flat() + bond = [(0,0), (0,1), (1,0), (1,1)] + set_options(symmetry="z22") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=(0,1)) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=(1,0)) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=(1,0)) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=(0,1)) - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z22(1,0), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z22(0,1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=Z22(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z22(1,0), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=(0,0)) + bc = rand((5,4), [bond]*2, pattern="++", dq=(1,0)) + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -58,13 +58,13 @@ def z22setup(request): @pytest.fixture(scope='class') def u1setup(request): - bond1 = BondInfo({U1(0):3, U1(1): 3, U1(3):3, U1(2):3}) - bond2 = BondInfo({U1(0):5, U1(1): 5, U1(3):5, U1(2):5}) + bond = (0,1,2,3) + set_options(symmetry="u1") - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=U1(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=U1(2), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=U1(-1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=U1(-2), pattern="+-+").to_flat() + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=-1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=-2) request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) @@ -72,8 +72,9 @@ def u1setup(request): request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=U1(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=U1(1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -83,21 +84,22 @@ def u1setup(request): @pytest.fixture(scope='class') def z4setup(request): - bond1 = BondInfo({Z4(0):3, Z4(1): 3, Z4(3):3, Z4(2):3}) - bond2 = BondInfo({Z4(0):5, Z4(1): 5, Z4(3):5, Z4(2):5}) + bond = (0,1,2,3) + set_options(symmetry="z4") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=1) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=2) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=0) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=1) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z4(1), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z4(2), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z4(0), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z4(1), pattern="+-+").to_flat() request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=Z4(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z4(1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -107,20 +109,22 @@ def z4setup(request): @pytest.fixture(scope='class') def z2setup(request): - bond1 = BondInfo({Z2(0):3, Z2(1): 3}) - bond2 = BondInfo({Z2(0):5, Z2(1): 5}) - request.cls.abc = abc = rand((bond2, bond1, bond1), dq=Z2(0), pattern="+--").to_flat() - request.cls.bcd = bcd = rand((bond1, bond1, bond1), dq=Z2(1), pattern="++-").to_flat() - request.cls.ega = ega = rand((bond1, bond1, bond2), dq=Z2(1), pattern="+--").to_flat() - request.cls.deg = deg = rand((bond1, bond1, bond1), dq=Z2(0), pattern="+-+").to_flat() + bond = (0,1) + set_options(symmetry="z2") + request.cls.abc = abc = rand((4,2,3), [bond]*3, pattern="+--", dq=0) + request.cls.bcd = bcd = rand((2,3,5), [bond]*3, pattern="++-", dq=1) + request.cls.ega = ega = rand((3,6,4), [bond]*3, pattern="+--", dq=1) + request.cls.deg = deg = rand((5,3,6), [bond]*3, pattern="+-+", dq=0) + request.cls.Tabc = Tabc = FermionTensor(abc, inds=['a','b','c'], tags=["abc"]) request.cls.Tega = Tega = FermionTensor(ega, inds=['e','g','a'], tags=["ega"]) request.cls.Tbcd = Tbcd = FermionTensor(bcd, inds=['b','c','d'], tags=["bcd"]) request.cls.Tdeg = Tdeg = FermionTensor(deg, inds=['d','e','g'], tags=["deg"]) request.cls.tn = FermionTensorNetwork((Tabc, Tega, Tbcd, Tdeg)) - ab = rand((bond1, bond1), dq=Z2(0), pattern="+-").to_flat() - bc = rand((bond1, bond1), dq=Z2(1), pattern="++").to_flat() + ab = rand((2,5), [bond]*2, pattern="+-", dq=0) + bc = rand((5,4), [bond]*2, pattern="++", dq=1) + Tab = FermionTensor(ab, inds=['a','b'], tags=["ab"]) Tbc = FermionTensor(bc, inds=['b','c'], tags=["bc"]) Tab1 = FermionTensor(ab.dagger, inds=['b1','a'], tags=["ab1"]) @@ -128,6 +132,7 @@ def z2setup(request): request.cls.norm = FermionTensorNetwork((Tab, Tbc, Tbc1, Tab1)) yield + @pytest.mark.usefixtures('u11setup') class TestU11: def test_backend(self): From cb295ad7840359dfe5c3c3c87c006ac52c83e6fa Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 8 Jun 2021 11:17:44 -0700 Subject: [PATCH 56/61] adjust to new _inds_to_eq --- quimb/tensor/tensor_block.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quimb/tensor/tensor_block.py b/quimb/tensor/tensor_block.py index 58bd3daf..a516b9b3 100644 --- a/quimb/tensor/tensor_block.py +++ b/quimb/tensor/tensor_block.py @@ -38,7 +38,7 @@ def get_block_contraction_path_info(*tensors, **contract_opts): o_ix = tuple(_gen_output_inds(total_ix)) # possibly map indices into the range needed by opt-einsum - eq = _inds_to_eq(all_ix, i_ix, o_ix) + eq = _inds_to_eq(i_ix, o_ix) size_dict = dict() for T in tensors: From f1db686cc8ad02e96d7e6bf470747a75c1e032d4 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 8 Jun 2021 11:51:50 -0700 Subject: [PATCH 57/61] bugfix --- quimb/tensor/tensor_block.py | 3 ++- quimb/tensor/tensor_core.py | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/quimb/tensor/tensor_block.py b/quimb/tensor/tensor_block.py index a516b9b3..1287977e 100644 --- a/quimb/tensor/tensor_block.py +++ b/quimb/tensor/tensor_block.py @@ -394,7 +394,8 @@ def __or__(self, other): """ return BlockTensorNetwork((self, other), virtual=True) - + _EXTRA_PROPS = () + def draw(self, *args, **kwargs): """Plot a graph of this tensor and its indices. """ diff --git a/quimb/tensor/tensor_core.py b/quimb/tensor/tensor_core.py index e941cccc..a86cdcbb 100644 --- a/quimb/tensor/tensor_core.py +++ b/quimb/tensor/tensor_core.py @@ -1617,7 +1617,7 @@ def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): self._owners = dict() # Short circuit for copying Tensors - if isinstance(data, self.__class__): + if isinstance(data, Tensor): self._data = data.data self._inds = data.inds self._tags = data.tags.copy() @@ -2802,7 +2802,7 @@ class TensorNetwork(object): def __init__(self, ts, *, virtual=False, check_collisions=True): # short-circuit for copying TensorNetworks - if isinstance(ts, self.__class__): + if isinstance(ts, TensorNetwork): self.tag_map = valmap(lambda tids: tids.copy(), ts.tag_map) self.ind_map = valmap(lambda tids: tids.copy(), ts.ind_map) self.tensor_map = dict() @@ -2832,13 +2832,13 @@ def __and__(self, other): """Combine this tensor network with more tensors, without contracting. Copies the tensors. """ - return self.__class__((self, other)) + return TensorNetwork((self, other)) def __or__(self, other): """Combine this tensor network with more tensors, without contracting. Views the constituent tensors. """ - return self.__class__((self, other), virtual=True) + return TensorNetwork((self, other), virtual=True) @classmethod def from_TN(cls, tn, like=None, inplace=False, **kwargs): From 84e8942fada102804fe63977b83b7a947b2cc7f1 Mon Sep 17 00:00:00 2001 From: yangcal Date: Mon, 14 Jun 2021 14:43:55 -0700 Subject: [PATCH 58/61] bugfix in block_gen.gen_2d_bonds --- quimb/tensor/block_gen.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/quimb/tensor/block_gen.py b/quimb/tensor/block_gen.py index 15e126f2..44a32e0d 100644 --- a/quimb/tensor/block_gen.py +++ b/quimb/tensor/block_gen.py @@ -196,7 +196,7 @@ def gen_2d_bonds_z2(pnarray, physical_infos): for ix, iy in product(range(Lx), range(Ly)): nvir = (ix != Lx - 1) + (ix != 0) +\ (iy != Ly - 1) + (iy != 0) - symmetry_infos[ix,iy] = [(0,1)] * nvir + [tuple(physical_infos[ix][iy])] + symmetry_infos[ix,iy] = [(0,1)] * nvir + [tuple(physical_infos[ix,iy])] dq_infos[ix,iy]= pnarray[ix,iy] return symmetry_infos, dq_infos @@ -228,7 +228,7 @@ def gen_2d_bonds_z22(n1array, n2array, physical_infos): for ix, iy in product(range(Lx), range(Ly)): nvir = (ix != Lx - 1) + (ix != 0) +\ (iy != Ly - 1) + (iy != 0) - symmetry_infos[ix,iy] = [((0,0),(0,1),(1,0),(1,1))] * nvir + [tuple(physical_infos[ix][iy])] + symmetry_infos[ix,iy] = [((0,0),(0,1),(1,0),(1,1))] * nvir + [tuple(physical_infos[ix,iy])] dq_infos[ix,iy]= (n1array[ix,iy], n2array[ix,iy]) return symmetry_infos, dq_infos From 2dc7bb7504567ef793a5bfa78d93b04bbad37fe6 Mon Sep 17 00:00:00 2001 From: yangcal Date: Mon, 14 Jun 2021 14:44:20 -0700 Subject: [PATCH 59/61] typo fix in fermion_2d --- quimb/tensor/fermion_2d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 89322252..0c55037c 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -100,7 +100,7 @@ def reorder(self, direction, layer_tags=None, inplace=False): Parameters ---------- - direction : {"row", "column"} + direction : {"row", "col"} The direction to reorder the entire network layer_tags : optional The relative order within a single coordinate From f0fc03a30c2abb8f858b75f6228248114eedc9bf Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 15 Jun 2021 16:36:10 -0700 Subject: [PATCH 60/61] remove the reordering after environment construction --- quimb/tensor/fermion_2d.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/quimb/tensor/fermion_2d.py b/quimb/tensor/fermion_2d.py index 0c55037c..8e8d6ede 100644 --- a/quimb/tensor/fermion_2d.py +++ b/quimb/tensor/fermion_2d.py @@ -140,7 +140,6 @@ def reorder(self, direction, layer_tags=None, inplace=False): if tid not in tid_map: tid_map[tid] = current_position current_position += 1 - return self._reorder_from_tid(tid_map, inplace) def _contract_boundary_full_bond( @@ -319,13 +318,6 @@ def _compute_plaquette_environments_row_first( col_envs[i0]['right', j0 + y_bsz - 1] ), check_collisions=False) - ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) - tid_lst = [] - for ij in ij_tags: - tid_lst += list(env_ij._get_tids_from_tags(ij)) - position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) - reorder_map = {i:j for i, j in zip(tid_lst, position)} - env_ij._reorder_from_tid(reorder_map, inplace=True) plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij return plaquette_envs @@ -416,13 +408,6 @@ def _compute_plaquette_environments_col_first( row_envs[j0]['top', i0 + x_bsz - 1] ), check_collisions=False) - ij_tags = (self.site_tag(i0 +ix, j0 + iy) for ix in range(x_bsz) for iy in range(y_bsz)) - tid_lst = [] - for ij in ij_tags: - tid_lst += list(env_ij._get_tids_from_tags(ij)) - position = range(len(env_ij.tensor_map)-len(tid_lst), len(env_ij.tensor_map)) - reorder_map = {i:j for i, j in zip(tid_lst, position)} - env_ij._reorder_from_tid(reorder_map, inplace=True) plaquette_envs[(i0, j0), (x_bsz, y_bsz)] = env_ij return plaquette_envs From 0a4b884065c6f2e1646c811a5c8cb79c2af206e8 Mon Sep 17 00:00:00 2001 From: yangcal Date: Tue, 15 Jun 2021 16:37:00 -0700 Subject: [PATCH 61/61] implement optimized reorder_all method, enable fermion_path attr --- quimb/tensor/fermion.py | 213 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 193 insertions(+), 20 deletions(-) diff --git a/quimb/tensor/fermion.py b/quimb/tensor/fermion.py index aabb191a..e6d1e9a3 100644 --- a/quimb/tensor/fermion.py +++ b/quimb/tensor/fermion.py @@ -201,8 +201,18 @@ def get_full_info(self, tid_or_site): T, site = self.tensor_order[tid_or_site] return T, tid, site + def get_ind_map(self): + ind_map = dict() + for tid, (T, _) in self.tensor_order.items(): + for ind in T.inds: + if ind not in ind_map: + ind_map[ind] = oset([tid]) + else: + ind_map[ind] = ind_map[ind].union(oset([tid])) + return ind_map + def _reorder_from_dict(self, tid_map): - """ Reorder tensors from a tensor_id/position mapping. + """ inplace reordering of tensors from a tensor_id/position mapping. Pizorn algorithm will be applied during moving Parameters @@ -210,14 +220,102 @@ def _reorder_from_dict(self, tid_map): tid_map: dictionary Mapping of tensor id to the desired location """ + if len(tid_map) == len(self.tensor_order): + self.reorder_all_(tid_map) + else: + tid_lst = list(tid_map.keys()) + des_sites = list(tid_map.values()) + # sort the destination sites to avoid cross-overs during moving + work_des_sites = sorted(des_sites)[::-1] + for isite in work_des_sites: + ind = des_sites.index(isite) + self.move(tid_lst[ind], isite) + + def reorder_all(self, tid_map, ind_map=None, inplace=False): + """ reordering of tensors from a tensor_id/position mapping. The tid_map + must contains the mapping for all tensors in this FermionSpace. + Pizorn algorithm will be applied during moving. + + Parameters + ---------- + tid_map: dictionary + Mapping of tensor id to the desired location + ind_map: dictinary, optional + Mapping of tensor index to the tensor id + inplace: bool, optional + Whether the orordering operation is inplace or not + """ + fs = self if inplace else self.copy() + # Compute Global Phase + if len(tid_map) != len(fs.tensor_order): + raise ValueError("tid_map must be of equal size as the FermionSpace") + nsites = len(fs.tensor_order) + parity_tab = [] + input_tab = [] + free_tids = [] + for isite in range(nsites): + tid = fs.get_tid_from_site(isite) + T = fs.tensor_order[tid][0] + if not T.avoid_phase: + free_tids.append(tid) + parity_tab.append(T.parity) + input_tab.append(tid) tid_lst = list(tid_map.keys()) des_sites = list(tid_map.values()) - # sort the destination sites to avoid cross-overs during moving - work_des_sites = sorted(des_sites)[::-1] - for isite in work_des_sites: - ind = des_sites.index(isite) - self.move(tid_lst[ind], isite) + global_parity = 0 + for fsite in range(nsites-1, -1, -1): + idx = des_sites.index(fsite) + tid = tid_lst[idx] + isite = input_tab.index(tid) + if isite == fsite: continue + global_parity += np.sum(parity_tab[isite+1:fsite+1]) * parity_tab[isite] + parity_tab[isite:fsite+1] = parity_tab[isite+1:fsite+1]+[parity_tab[isite]] + input_tab[isite:fsite+1] = input_tab[isite+1:fsite+1]+[input_tab[isite]] + + _global_flip = (int(global_parity) % 2 == 1) + if _global_flip: + if len(free_tids) ==0: + raise ValueError("Global flip required on one tensor but all tensors are marked to avoid phase") + T = fs.tensor_order[free_tids[0]][0] + T.flip_(global_flip=_global_flip) + + # Compute Local Phase + if ind_map is None: + ind_map = fs.get_ind_map() + else: + ind_map = ind_map.copy() + + local_flip_info = dict() + for tid1, fsite1 in tid_map.items(): + T1, isite1 = fs.tensor_order[tid1] + for ind in T1.inds: + tids = ind_map.pop(ind, []) + if len(tids) <2: + continue + tid2, = tids - oset([tid1]) + T2, isite2 = fs.tensor_order[tid2] + fsite2 = tid_map[tid2] + if (isite1-isite2) * (fsite1-fsite2) < 0: + if T1.avoid_phase and T2.avoid_phase: + raise ValueError("relative order for %s and %s changed, local phase can not be avoided"%(tid1, tid2)) + else: + marked_tid = tid2 if T1.avoid_phase else tid1 + if marked_tid not in local_flip_info: + local_flip_info[marked_tid] = [ind] + else: + local_flip_info[marked_tid].append(ind) + + for tid, inds in local_flip_info.items(): + T = fs.tensor_order[tid][0] + T.flip_(local_inds=inds) + + for tid, fsite in tid_map.items(): + T = fs.tensor_order[tid][0] + fs.tensor_order.update({tid: (T, fsite)}) + return fs + + reorder_all_ = functools.partialmethod(reorder_all, inplace=True) def __setitem__(self, site, tsr): if site in self.sites: @@ -238,6 +336,7 @@ def move(self, tid_or_site, des_site): """ tsr, tid, site = self.get_full_info(tid_or_site) + avoid_phase = tsr.avoid_phase if site == des_site: return move_left = (des_site < site) iterator = range(des_site, site) if move_left else range(site+1, des_site+1) @@ -246,23 +345,27 @@ def move(self, tid_or_site, des_site): parity = 0 for itid in tid_lst: itsr, isite = self.tensor_order[itid] - parity += itsr.parity - shared_inds += list(oset(itsr.inds) & oset(tsr.inds)) + i_shared_inds = list(oset(itsr.inds) & oset(tsr.inds)) + if avoid_phase: + global_flip = (tsr.parity * itsr.parity == 1) + if len(i_shared_inds)>0 or global_flip: + if itsr.avoid_phase: + raise ValueError("Two tensors marked to avoid phase") + itsr.flip_(global_flip=global_flip, local_inds=i_shared_inds) + else: + shared_inds += i_shared_inds + parity += itsr.parity + if move_left: self.tensor_order[itid] = (itsr, isite+1) else: self.tensor_order[itid] = (itsr, isite-1) - global_parity = (parity % 2) * tsr.data.parity - axes = [tsr.inds.index(i) for i in shared_inds] - if global_parity == 0 and len(axes) ==0: - new_data = tsr.data - else: - new_data = tsr.data.copy() - if global_parity !=0: - new_data._global_flip() - if len(axes)>0: - new_data._local_flip(axes) - tsr.modify(data=new_data) + + if not avoid_phase: + global_parity = (parity % 2) * tsr.data.parity + global_flip = (global_parity == 1) + tsr.flip_(global_flip=global_flip, local_inds=shared_inds) + self.tensor_order[tid] = (tsr, des_site) def move_past(self, tsr, site_range=None): @@ -483,18 +586,61 @@ def _fetch_fermion_space(*tensors, inplace=True): class FermionTensor(BlockTensor): - __slots__ = ('_data', '_inds', '_tags', '_left_inds', '_owners', '_fermion_owner') + __slots__ = ('_data', '_inds', '_tags', '_left_inds', + '_owners', '_fermion_owner', '_avoid_phase', + '_fermion_path') def __init__(self, data=1.0, inds=(), tags=None, left_inds=None): # a new or copied Tensor always has no owners self._fermion_owner = None BlockTensor.__init__(self, data=data, inds=inds, tags=tags, left_inds=left_inds) + if isinstance(data, FermionTensor): + self._data = data.data.copy() + self._avoid_phase = data._avoid_phase + self._fermion_path = data._fermion_path.copy() + else: + self._avoid_phase = False + self._fermion_path = dict() @property def symmetry(self): return self.data.dq.__name__ + @property + def net_symmetry(self): + return self.data.dq + + @property + def avoid_phase(self): + return self._avoid_phase + + @property + def fermion_path(self): + return self._fermion_path + + @avoid_phase.setter + def avoid_phase(self, avoid_phase): + self._avoid_phase = avoid_phase + + @fermion_path.setter + def fermion_path(self, fermion_path): + self._fermion_path = fermion_path + + def set_fermion_path(self, global_flip=False, local_inds=None): + if local_inds is None: + local_inds = [] + _global_flip = self.fermion_path.pop("global_flip", False) + _local_inds = self.fermion_path.pop("local_inds", []) + self._fermion_path["global_flip"] = _global_flip ^ global_flip + all_inds = tuple(local_inds) + tuple(_local_inds) + updated_local_inds = [] + for ind in all_inds: + count = all_inds.count(ind) + if count %2 ==1: + updated_local_inds.append(ind) + self._fermion_path["local_inds"] = updated_local_inds + @property def fermion_owner(self): return self._fermion_owner @@ -503,12 +649,39 @@ def fermion_owner(self): def parity(self): return self.data.parity + def modify(self, **kwargs): + if "inds" in kwargs and "data" not in kwargs: + inds = kwargs.get("inds") + local_inds = self.fermion_path.pop("local_inds", []) + new_local_inds = [] + for ind in local_inds: + if ind in self.inds: + new_ind = inds[self.inds.index(ind)] + new_local_inds.append(new_ind) + self._fermion_path["local_inds"] = new_local_inds + + super().modify(**kwargs) + + def flip(self, global_flip=False, local_inds=None, inplace=False): + T = self if inplace else self.copy() + T.set_fermion_path(global_flip=global_flip, local_inds=local_inds) + if global_flip: + T.data._global_flip() + if local_inds is not None and len(local_inds)>0: + axes = [T.inds.index(ind) for ind in local_inds] + T.data._local_flip(axes) + return T + + flip_ = functools.partialmethod(flip, inplace=True) + def copy(self, deep=False): """Copy this tensor. Note by default (``deep=False``), the underlying array will *not* be copied. The fermion owner will to reset to None """ if deep: t = copy.deepcopy(self) + t.avoid_phase = self.avoid_phase + t.fermion_path = self.fermion_path.copy() t.remove_fermion_owner() else: t = self.__class__(self, None)