Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
hczhai committed Jun 13, 2023
2 parents ddf3c92 + 63f9e8b commit 428ce33
Show file tree
Hide file tree
Showing 12 changed files with 47 additions and 47 deletions.
4 changes: 2 additions & 2 deletions pyblock3/algebra/ad/flat.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def from_sparse(spt):
idxs[1:] = np.cumsum(shapes.prod(axis=1), dtype=np.uint64)
data = np.zeros((idxs[-1], ), dtype=spt.dtype)
for i in range(n_blocks):
data[idxs[i]:idxs[i + 1]] = spt.blocks[i].flatten()
data[idxs[i]:idxs[i + 1]] = spt.blocks[i].ravel()
return FlatSparseTensor(q_labels, shapes, data, idxs)

@staticmethod
Expand Down Expand Up @@ -717,7 +717,7 @@ def _diag(v):
return v.__class__(
q_labels=np.repeat(v.q_labels, 2, axis=1),
shapes=np.repeat(v.shapes, 2, axis=1),
data=jnp.concatenate([jnp.diag(v.data[i:j]).flatten() for i, j in zip(v.idxs, v.idxs[1:])]))
data=jnp.concatenate([jnp.diag(v.data[i:j]).ravel() for i, j in zip(v.idxs, v.idxs[1:])]))
elif v.n_blocks != 0:
raise RuntimeError("ndim for np.diag must be 1 or 2.")
else:
Expand Down
36 changes: 18 additions & 18 deletions pyblock3/algebra/ad/flat_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ def flat_sparse_tensordot(aqs, ashs, adata, aidxs, bqs, bshs, bdata, bidxs, idxa
idxs.append(idxs[-1] + mat.size)
qs.append(outq)
shapes.append(mat.shape)
mats.append(mat.flatten())
mats.append(mat.ravel())
else:
mats[blocks_map[outqk]] += mat.flatten()
mats[blocks_map[outqk]] += mat.ravel()

return (np.array(qs, dtype=np.uint32),
np.array(shapes, dtype=np.uint32),
Expand Down Expand Up @@ -150,7 +150,7 @@ def flat_sparse_tensor_svd(aqs, ashs, adata, aidxs, idx, linfo, rinfo, pattern):
nk = np.multiply.reduce(sh)
qq = np.array([x.to_flat() for x in qs + (q, )], dtype=aqs.dtype)
sh = np.array(sh + (l.shape[-1], ), dtype=ashs.dtype)
l_blocks.append((qq, sh, l[k:k + nk, :].flatten()))
l_blocks.append((qq, sh, l[k:k + nk, :].ravel()))
pqs = qs
pqs = None
for qs in items[q][1]:
Expand All @@ -160,7 +160,7 @@ def flat_sparse_tensor_svd(aqs, ashs, adata, aidxs, idx, linfo, rinfo, pattern):
nk = np.multiply.reduce(sh)
qq = np.array([x.to_flat() for x in (q, ) + qs], dtype=aqs.dtype)
sh = np.array((r.shape[0], ) + sh, dtype=ashs.dtype)
r_blocks.append((qq, sh, r[:, k:k + nk].flatten()))
r_blocks.append((qq, sh, r[:, k:k + nk].ravel()))
pqs = qs
lqs = np.array([xl[0] for xl in l_blocks], dtype=aqs.dtype)
lshs = np.array([xl[1] for xl in l_blocks], dtype=ashs.dtype)
Expand Down Expand Up @@ -203,7 +203,7 @@ def flat_sparse_left_svd(aqs, ashs, adata, aidxs, indexed=False):
rqs[ir, :] = qq
rshs[ir] = r.shape
ridxs[ir + 1] = ridxs[ir] + r.size
rmats[ir] = r.flatten()
rmats[ir] = r.ravel()
sqs[ir, 0] = qq
sshs[ir, 0] = s.shape[0]
sidxs[ir + 1] = sidxs[ir] + s.shape[0]
Expand All @@ -213,7 +213,7 @@ def flat_sparse_left_svd(aqs, ashs, adata, aidxs, indexed=False):
lshs[v, -1] = l.shape[-1]
lidx[ill:ill + len(v)] = v
for q, ia in zip(ls, v):
lmats[ia] = q.flatten()
lmats[ia] = q.ravel()
ill += 1
assert ill == nblocks_l
rr = lqs[lidx], lshs[lidx], jnp.concatenate([lmats[x] for x in lidx]), None, \
Expand Down Expand Up @@ -253,7 +253,7 @@ def flat_sparse_right_svd(aqs, ashs, adata, aidxs, indexed=False):
lqs[il, :] = qq
lshs[il] = l.shape
lidxs[il + 1] = lidxs[il] + l.size
lmats[il] = l.flatten()
lmats[il] = l.ravel()
sqs[il, 0] = qq
sshs[il, 0] = s.shape[0]
sidxs[il + 1] = sidxs[il] + s.shape[0]
Expand All @@ -263,7 +263,7 @@ def flat_sparse_right_svd(aqs, ashs, adata, aidxs, indexed=False):
rshs[v, 0] = r.shape[0]
ridx[irr:irr + len(v)] = v
for q, ia in zip(rs, v):
rmats[ia] = q.flatten()
rmats[ia] = q.ravel()
irr += 1
assert irr == nblocks_r
rr = lqs, lshs, jnp.concatenate(lmats), lidxs, \
Expand Down Expand Up @@ -296,12 +296,12 @@ def flat_sparse_left_canonicalize(aqs, ashs, adata, aidxs):
rqs[ir, :] = qq
rshs[ir] = r.shape
ridxs[ir + 1] = ridxs[ir] + r.size
rmats[ir] = r.flatten()
rmats[ir] = r.ravel()
qs = np.split(q, list(accumulate(l_shapes[:-1])), axis=0)
assert len(qs) == len(v)
qshs[v, -1] = r.shape[0]
for q, ia in zip(qs, v):
qmats[ia] = q.flatten()
qmats[ia] = q.ravel()
return qqs, qshs, jnp.concatenate(qmats), None, rqs, rshs, jnp.concatenate(rmats), ridxs


Expand Down Expand Up @@ -329,12 +329,12 @@ def flat_sparse_right_canonicalize(aqs, ashs, adata, aidxs):
lqs[il, :] = qq
lshs[il] = r.shape[::-1]
lidxs[il + 1] = lidxs[il] + r.size
lmats[il] = r.T.flatten()
lmats[il] = r.T.ravel()
qs = np.split(q, list(accumulate(r_shapes[:-1])), axis=0)
assert len(qs) == len(v)
qshs[v, 0] = r.shape[0]
for q, ia in zip(qs, v):
qmats[ia] = q.T.flatten()
qmats[ia] = q.T.ravel()
return lqs, lshs, jnp.concatenate(lmats), lidxs, qqs, qshs, jnp.concatenate(qmats), None


Expand All @@ -356,7 +356,7 @@ def flat_sparse_right_canonicalize_indexed(aqs, ashs, adata, aidxs):

def flat_sparse_transpose(aqs, ashs, adata, aidxs, axes):
data = jnp.concatenate(
[jnp.transpose(adata[i:j].reshape(sh), axes=axes).flatten()
[jnp.transpose(adata[i:j].reshape(sh), axes=axes).ravel()
for i, j, sh in zip(aidxs, aidxs[1:], ashs)])
return (aqs[:, axes], ashs[:, axes], data, aidxs)

Expand Down Expand Up @@ -456,7 +456,7 @@ def flat_sparse_fuse(aqs, ashs, adata, aidxs, idxs, info, pattern):
blocks_map[zrq] = (blocks_map[zrq][0], blocks_map[zrq][1],
rdata.at[sl].set(adata[aidxs[ia]:aidxs[ia + 1]].reshape(rsh)))
rqs, rshs, rdata = zip(*blocks_map.values())
return np.array(rqs, dtype=np.uint32), np.array(rshs, dtype=np.uint32), jnp.concatenate([d.flatten() for d in rdata]), None
return np.array(rqs, dtype=np.uint32), np.array(rshs, dtype=np.uint32), jnp.concatenate([d.ravel() for d in rdata]), None


def flat_sparse_trans_fusing_info(info):
Expand Down Expand Up @@ -527,7 +527,7 @@ def flat_sparse_kron_add(aqs, ashs, adata, aidxs, bqs, bshs, bdata, bidxs, infol
mat[: ashs[ia, 0], ..., : ashs[ia, -1]]
+ adata[aidxs[ia]:aidxs[ia + 1]].reshape(ashs[ia])
)
cdata = cdata.at[cidxs[ic]:cidxs[ic + 1]].set(xmat.flatten())
cdata = cdata.at[cidxs[ic]:cidxs[ic + 1]].set(xmat.ravel())

# copy b blocks to smaller index in new block
for ib, q in xbqs:
Expand All @@ -540,7 +540,7 @@ def flat_sparse_kron_add(aqs, ashs, adata, aidxs, bqs, bshs, bdata, bidxs, infol
mat[-int(bshs[ib, 0]):, ..., -int(bshs[ib, -1]):]
+ bdata[bidxs[ib]:bidxs[ib + 1]].reshape(bshs[ib])
)
cdata = cdata.at[cidxs[ic]:cidxs[ic + 1]].set(xmat.flatten())
cdata = cdata.at[cidxs[ic]:cidxs[ic + 1]].set(xmat.ravel())

return cqs, cshs, cdata, cidxs

Expand Down Expand Up @@ -601,7 +601,7 @@ def flat_sparse_truncate_svd(lqs, lshs, ldata, lidxs, sqs, sshs, sdata, sidxs,
sh = lshs[ikl:ikl + nkl].copy()
sh[:, -1] = ng
dt = ldata[lidxs[ikl]:lidxs[ikl + nkl]
].reshape((-1, ns))[:, gl].flatten()
].reshape((-1, ns))[:, gl].ravel()
l_blocks.append((lqs[ikl:ikl + nkl], sh, dt,
np.arange(ikl, ikl + nkl, dtype=int)))
nsshs[iks, 0] = ng
Expand All @@ -618,7 +618,7 @@ def flat_sparse_truncate_svd(lqs, lshs, ldata, lidxs, sqs, sshs, sdata, sidxs,
[rdata[irst:ired].reshape((ns, -1))
for irst, ired in zip(rrx[:-1], rrx[1:])], axis=1)[gl, :]
dt = np.concatenate(
[dt[:, irst:ired].flatten() for irst, ired in zip(rrxr[:-1], rrxr[1:])])
[dt[:, irst:ired].ravel() for irst, ired in zip(rrxr[:-1], rrxr[1:])])
r_blocks.append((rqs[ikr:ikr + nkr], sh, dt,
np.arange(ikr, ikr + nkr, dtype=int)))
if eigen_values:
Expand Down
2 changes: 1 addition & 1 deletion pyblock3/algebra/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def density(self):

@property
def dtype(self):
for v in self.flatten():
for v in self.ravel():
if not isinstance(v, int) or v != 0:
return v.dtype
return float
Expand Down
2 changes: 1 addition & 1 deletion pyblock3/algebra/fermion.py
Original file line number Diff line number Diff line change
Expand Up @@ -1792,7 +1792,7 @@ def from_sparse(spt):
idxs[1:] = np.cumsum(shapes.prod(axis=1), dtype=INDEX_DTYPE)
data = np.zeros((idxs[-1], ), dtype=spt.dtype)
for i in range(n_blocks):
data[idxs[i]:idxs[i + 1]] = spt.blocks[i].flatten()
data[idxs[i]:idxs[i + 1]] = spt.blocks[i].ravel()
return FlatFermionTensor(q_labels, shapes, data, spt.pattern, idxs, symmetry=cls, shape=spt.shape)

@staticmethod
Expand Down
4 changes: 2 additions & 2 deletions pyblock3/algebra/flat.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def from_sparse(spt):
idxs[1:] = np.cumsum(shapes.prod(axis=1), dtype=np.uint64)
data = np.zeros((idxs[-1], ), dtype=spt.dtype)
for i in range(n_blocks):
data[idxs[i]:idxs[i + 1]] = spt.blocks[i].flatten()
data[idxs[i]:idxs[i + 1]] = spt.blocks[i].ravel()
return FlatSparseTensor(q_labels, shapes, data, idxs)

def __str__(self):
Expand Down Expand Up @@ -710,7 +710,7 @@ def _diag(v):
return v.__class__(
q_labels=np.repeat(v.q_labels, 2, axis=1),
shapes=np.repeat(v.shapes, 2, axis=1),
data=np.concatenate([np.diag(v.data[i:j]).flatten() for i, j in zip(v.idxs, v.idxs[1:])]))
data=np.concatenate([np.diag(v.data[i:j]).ravel() for i, j in zip(v.idxs, v.idxs[1:])]))
elif v.n_blocks != 0:
raise RuntimeError("ndim for np.diag must be 1 or 2.")
else:
Expand Down
32 changes: 16 additions & 16 deletions pyblock3/algebra/impl/flat.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@ def flat_sparse_tensordot(aqs, ashs, adata, aidxs, bqs, bshs, bdata, bidxs, idxa
idxs.append(idxs[-1] + mat.size)
qs.append(outq)
shapes.append(mat.shape)
mats.append(mat.flatten())
mats.append(mat.ravel())
else:
mats[blocks_map[outqk]] += mat.flatten()
mats[blocks_map[outqk]] += mat.ravel()

return (np.array(qs, dtype=np.uint32),
np.array(shapes, dtype=np.uint32),
Expand Down Expand Up @@ -149,7 +149,7 @@ def flat_sparse_tensor_svd(aqs, ashs, adata, aidxs, idx, linfo, rinfo, pattern):
nk = np.multiply.reduce(sh)
qq = np.array([x.to_flat() for x in qs + (q, )], dtype=aqs.dtype)
sh = np.array(sh + (l.shape[-1], ), dtype=ashs.dtype)
l_blocks.append((qq, sh, l[k:k + nk, :].flatten()))
l_blocks.append((qq, sh, l[k:k + nk, :].ravel()))
pqs = qs
pqs = None
for qs in items[q][1]:
Expand All @@ -159,7 +159,7 @@ def flat_sparse_tensor_svd(aqs, ashs, adata, aidxs, idx, linfo, rinfo, pattern):
nk = np.multiply.reduce(sh)
qq = np.array([x.to_flat() for x in (q, ) + qs], dtype=aqs.dtype)
sh = np.array((r.shape[0], ) + sh, dtype=ashs.dtype)
r_blocks.append((qq, sh, r[:, k:k + nk].flatten()))
r_blocks.append((qq, sh, r[:, k:k + nk].ravel()))
pqs = qs
lqs = np.array([xl[0] for xl in l_blocks], dtype=aqs.dtype)
lshs = np.array([xl[1] for xl in l_blocks], dtype=ashs.dtype)
Expand Down Expand Up @@ -202,7 +202,7 @@ def flat_sparse_left_svd(aqs, ashs, adata, aidxs, indexed=False):
rqs[ir, :] = qq
rshs[ir] = r.shape
ridxs[ir + 1] = ridxs[ir] + r.size
rmats[ir] = r.flatten()
rmats[ir] = r.ravel()
sqs[ir, 0] = qq
sshs[ir, 0] = s.shape[0]
sidxs[ir + 1] = sidxs[ir] + s.shape[0]
Expand All @@ -212,7 +212,7 @@ def flat_sparse_left_svd(aqs, ashs, adata, aidxs, indexed=False):
lshs[v, -1] = l.shape[-1]
lidx[ill:ill + len(v)] = v
for q, ia in zip(ls, v):
lmats[ia] = q.flatten()
lmats[ia] = q.ravel()
ill += 1
assert ill == nblocks_l
rr = lqs[lidx], lshs[lidx], np.concatenate([lmats[x] for x in lidx]), None, \
Expand Down Expand Up @@ -252,7 +252,7 @@ def flat_sparse_right_svd(aqs, ashs, adata, aidxs, indexed=False):
lqs[il, :] = qq
lshs[il] = l.shape
lidxs[il + 1] = lidxs[il] + l.size
lmats[il] = l.flatten()
lmats[il] = l.ravel()
sqs[il, 0] = qq
sshs[il, 0] = s.shape[0]
sidxs[il + 1] = sidxs[il] + s.shape[0]
Expand All @@ -262,7 +262,7 @@ def flat_sparse_right_svd(aqs, ashs, adata, aidxs, indexed=False):
rshs[v, 0] = r.shape[0]
ridx[irr:irr + len(v)] = v
for q, ia in zip(rs, v):
rmats[ia] = q.flatten()
rmats[ia] = q.ravel()
irr += 1
assert irr == nblocks_r
rr = lqs, lshs, np.concatenate(lmats), lidxs, \
Expand Down Expand Up @@ -295,12 +295,12 @@ def flat_sparse_left_canonicalize(aqs, ashs, adata, aidxs):
rqs[ir, :] = qq
rshs[ir] = r.shape
ridxs[ir + 1] = ridxs[ir] + r.size
rmats[ir] = r.flatten()
rmats[ir] = r.ravel()
qs = np.split(q, list(accumulate(l_shapes[:-1])), axis=0)
assert len(qs) == len(v)
qshs[v, -1] = r.shape[0]
for q, ia in zip(qs, v):
qmats[ia] = q.flatten()
qmats[ia] = q.ravel()
return qqs, qshs, np.concatenate(qmats), None, rqs, rshs, np.concatenate(rmats), ridxs


Expand Down Expand Up @@ -328,12 +328,12 @@ def flat_sparse_right_canonicalize(aqs, ashs, adata, aidxs):
lqs[il, :] = qq
lshs[il] = r.shape[::-1]
lidxs[il + 1] = lidxs[il] + r.size
lmats[il] = r.T.flatten()
lmats[il] = r.T.ravel()
qs = np.split(q, list(accumulate(r_shapes[:-1])), axis=0)
assert len(qs) == len(v)
qshs[v, 0] = r.shape[0]
for q, ia in zip(qs, v):
qmats[ia] = q.T.flatten()
qmats[ia] = q.T.ravel()
return lqs, lshs, np.concatenate(lmats), lidxs, qqs, qshs, np.concatenate(qmats), None


Expand All @@ -355,7 +355,7 @@ def flat_sparse_right_canonicalize_indexed(aqs, ashs, adata, aidxs):

def flat_sparse_transpose(aqs, ashs, adata, aidxs, axes):
data = np.concatenate(
[np.transpose(adata[i:j].reshape(sh), axes=axes).flatten()
[np.transpose(adata[i:j].reshape(sh), axes=axes).ravel()
for i, j, sh in zip(aidxs, aidxs[1:], ashs)])
return (aqs[:, axes], ashs[:, axes], data, aidxs)

Expand Down Expand Up @@ -453,7 +453,7 @@ def flat_sparse_fuse(aqs, ashs, adata, aidxs, idxs, info, pattern):
k, k + nk) for ix in range(len(rq)))
rdata[sl] = adata[aidxs[ia]:aidxs[ia + 1]].reshape(rsh)
rqs, rshs, rdata = zip(*blocks_map.values())
return np.array(rqs, dtype=np.uint32), np.array(rshs, dtype=np.uint32), np.concatenate([d.flatten() for d in rdata]), None
return np.array(rqs, dtype=np.uint32), np.array(rshs, dtype=np.uint32), np.concatenate([d.ravel() for d in rdata]), None


def flat_sparse_trans_fusing_info(info):
Expand Down Expand Up @@ -588,7 +588,7 @@ def flat_sparse_truncate_svd(lqs, lshs, ldata, lidxs, sqs, sshs, sdata, sidxs,
sh = lshs[ikl:ikl + nkl].copy()
sh[:, -1] = ng
dt = ldata[lidxs[ikl]:lidxs[ikl + nkl]
].reshape((-1, ns))[:, gl].flatten()
].reshape((-1, ns))[:, gl].ravel()
l_blocks.append((lqs[ikl:ikl + nkl], sh, dt,
np.arange(ikl, ikl + nkl, dtype=int)))
nsshs[iks, 0] = ng
Expand All @@ -605,7 +605,7 @@ def flat_sparse_truncate_svd(lqs, lshs, ldata, lidxs, sqs, sshs, sdata, sidxs,
[rdata[irst:ired].reshape((ns, -1))
for irst, ired in zip(rrx[:-1], rrx[1:])], axis=1)[gl, :]
dt = np.concatenate(
[dt[:, irst:ired].flatten() for irst, ired in zip(rrxr[:-1], rrxr[1:])])
[dt[:, irst:ired].ravel() for irst, ired in zip(rrxr[:-1], rrxr[1:])])
r_blocks.append((rqs[ikr:ikr + nkr], sh, dt,
np.arange(ikr, ikr + nkr, dtype=int)))
if eigen_values:
Expand Down
2 changes: 1 addition & 1 deletion pyblock3/block2/hamil.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def hchain(n_sites, r=1.8, pg_reorder=True, **kwargs):
assert abs(h1e[i, j] - h1e[j, i]) < tol
mh1e[k] = h1e[i, j]
k += 1
mg2e = g2e.flatten().copy()
mg2e = g2e.ravel().copy()
mh1e[np.abs(mh1e) < tol] = 0.0
mg2e[np.abs(mg2e) < tol] = 0.0
fcidump.initialize_su2(n_sites, n_elec, 0, 1, ecore, mh1e, mg2e)
Expand Down
4 changes: 2 additions & 2 deletions pyblock3/block2/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ def to_block2(mpo, use_complex=False):
ql, qr = block.q_labels
ql, qr = SZ(ql.n, ql.twos, ql.pg), SZ(qr.n, qr.twos, qr.pg)
iq = xmat.info.find_state(dq.combine(ql, qr))
xmat[iq] = np.asarray(block).flatten()
xmat[iq] = np.asarray(block).ravel()
tensors[i].ops[xexpr] = xmat
if iop not in tensors[i].ops:
if idq not in site_op_infos[i]:
Expand All @@ -349,7 +349,7 @@ def to_block2(mpo, use_complex=False):
xmat = bx.SparseMatrix(dalloc)
xmat.allocate(minfo)
for ix in range(0, minfo.n):
xmat[ix] = np.identity(minfo.n_states_ket[ix]).flatten()
xmat[ix] = np.identity(minfo.n_states_ket[ix]).ravel()
tensors[i].ops[iop] = xmat
lopd = [SymbolicMPOTools.tr_expr_to_block2(expr, bx) for expr in ts.lop.data]
ropd = [SymbolicMPOTools.tr_expr_to_block2(expr, bx) for expr in ts.rop.data]
Expand Down
2 changes: 1 addition & 1 deletion pyblock3/gaussian/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def pack(x):
r = np.zeros(size)
iz = 0
for z in x:
r[iz:iz + z.nelement()] = z.detach().numpy().flatten()
r[iz:iz + z.nelement()] = z.detach().numpy().ravel()
iz += z.nelement()
assert iz == r.size
return r
Expand Down
2 changes: 1 addition & 1 deletion pyblock3/symbolic/symbolic.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def get_symbols(self):
return set([abs(op) for op in self if op != 0])

def __iter__(self):
yield from self.data.flatten()
yield from self.data.ravel()

def __len__(self):
return self.data.size
Expand Down
2 changes: 1 addition & 1 deletion tests/tensor_network/03-gaussian/05-mera-2d-4x4-ghf.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def ghf_make_rdm2():
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]]
).flatten()
).ravel()

dm0b = ~dm0a & 1

Expand Down
2 changes: 1 addition & 1 deletion tests/tensor_network/03-gaussian/05-mera-2d-4x4-uhf.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]]
).flatten()
).ravel()

dm0b = ~dm0a & 1
mf.conv_tol = 1E-14
Expand Down

0 comments on commit 428ce33

Please sign in to comment.