Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Basis expansion method with expand #24

Merged
merged 23 commits into from
May 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ITensorTDVP"
uuid = "25707e16-a4db-4a07-99d9-4d67b7af0342"
authors = ["Matthew Fishman <mfishman@flatironinstitute.org> and contributors"]
version = "0.4.0"
version = "0.4.1"

[deps]
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
Expand Down
17 changes: 17 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,23 @@ However, as noted above we now recommend installing and loading `ITensorMPS` ins

## News

### ITensorTDVP.jl v0.4.1 release notes

#### Breaking changes

#### New features

- A new (experimental) `expand` function has been introduced for performing global Krylov expansion based on [arXiv:2005.06104](https://arxiv.org/abs/2005.06104), which can help with the accuracy of TDVP evolution in certain cases. See the docstrings of `expand` for more details:
```julia
julia> using ITensorTDVP

julia> ?

help?> expand
# ...
```
Users are not given many customization options just yet as we gain more experience on the right balance between efficacy of the expansion and performance, and default values and keyword arguments are subject to change as we learn more about how to best use the method.

### ITensorTDVP.jl v0.4 release notes

#### Breaking changes
Expand Down
3 changes: 2 additions & 1 deletion src/ITensorTDVP.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
module ITensorTDVP
export TimeDependentSum, dmrg_x, linsolve, tdvp, to_vec
export TimeDependentSum, dmrg_x, expand, linsolve, tdvp, to_vec
include("ITensorsExtensions.jl")
using .ITensorsExtensions: to_vec
include("applyexp.jl")
Expand All @@ -17,6 +17,7 @@ include("contract.jl")
include("reducedconstantterm.jl")
include("reducedlinearproblem.jl")
include("linsolve.jl")
include("expand.jl")
using PackageExtensionCompat: @require_extensions
function __init__()
@require_extensions
Expand Down
159 changes: 159 additions & 0 deletions src/expand.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
using ITensors:
ITensors,
Algorithm,
Index,
ITensor,
@Algorithm_str,
δ,
commonind,
dag,
denseblocks,
directsum,
hasqns,
prime,
scalartype,
uniqueinds
using ITensors.ITensorMPS: MPO, MPS, apply, dim, linkind, maxlinkdim, orthogonalize
using LinearAlgebra: normalize, svd, tr

# Possible improvements:
# - Allow a maxdim argument to be passed to `expand`.
# - Current behavior is letting bond dimension get too
# big when used in imaginary time evolution.
# - Consider switching the default to variational/fit apply
# when building Krylov vectors.
# - Use (1-tau*operator)|state> to generate Krylov vectors
# instead of operator|state>. Is that needed?

function expand(state, reference; alg, kwargs...)
return expand(Algorithm(alg), state, reference; kwargs...)
end

function expand_cutoff_doctring()
return """
The cutoff is used to control the truncation of the expanded
basis and defaults to half the precision of the scalar type
of the input state, i.e. ~1e-8 for `Float64`.
"""
end

function expand_warning_doctring()
return """
!!! warning
Users are not given many customization options just yet as we
gain more experience on the right balance between efficacy of the
expansion and performance in various scenarios, and default values
and keyword arguments are subject to change as we learn more about
how to best use the method.
"""
end

function expand_citation_docstring()
return """
[^global_expansion]: Time Dependent Variational Principle with Ancillary Krylov Subspace. Mingru Yang, Steven R. White, [arXiv:2005.06104](https://arxiv.org/abs/2005.06104)
"""
end

"""
expand(state::MPS, references::Vector{MPS}; alg="orthogonalize", cutoff)

Given an MPS `state` and a collection of MPS `references`,
returns an MPS which is equal to `state`
(has fidelity 1 with `state`) but whose MPS basis
is expanded to contain a portion of the basis of
the `references` that is orthogonal to the MPS basis of `state`.
See [^global_expansion] for more details.

$(expand_cutoff_doctring())

$(expand_warning_doctring())

$(expand_citation_docstring())
"""
function expand(
::Algorithm"orthogonalize",
state::MPS,
references::Vector{MPS};
cutoff=(√(eps(real(scalartype(state))))),
)
n = length(state)
state = orthogonalize(state, n)
references = map(reference -> orthogonalize(reference, n), references)
s = siteinds(state)
for j in reverse(2:n)
# SVD state[j] to compute basisⱼ
linds = [s[j - 1]; linkinds(state, j - 1)]
_, λⱼ, basisⱼ = svd(state[j], linds; righttags="bψ_$j,Link")
rinds = uniqueinds(basisⱼ, λⱼ)
# Make projectorⱼ
idⱼ = prod(r -> denseblocks(δ(scalartype(state), r', dag(r))), rinds)
projectorⱼ = idⱼ - prime(basisⱼ, rinds) * dag(basisⱼ)
# Sum reference density matrices
ρⱼ = sum(reference -> prime(reference[j], rinds) * dag(reference[j]), references)
# TODO: Fix bug that `tr` isn't preserving the element type.
ρⱼ /= scalartype(state)(tr(ρⱼ))
# Apply projectorⱼ
ρⱼ_projected = apply(apply(projectorⱼ, ρⱼ), projectorⱼ)
expanded_basisⱼ = basisⱼ
if norm(ρⱼ_projected) > 10^3 * eps(real(scalartype(state)))
# Diagonalize projected density matrix ρⱼ_projected
# to compute reference_basisⱼ, which spans part of right basis
# of references which is orthogonal to right basis of state
dⱼ, reference_basisⱼ = eigen(
ρⱼ_projected; cutoff, ishermitian=true, righttags="bϕ_$j,Link"
)
state_indⱼ = only(commoninds(basisⱼ, λⱼ))
reference_indⱼ = only(commoninds(reference_basisⱼ, dⱼ))
expanded_basisⱼ, expanded_indⱼ = directsum(
basisⱼ => state_indⱼ, reference_basisⱼ => reference_indⱼ
)
end
# Shift ortho center one site left using dag(expanded_basisⱼ)
# and replace tensor at site j with expanded_basisⱼ
state[j - 1] = state[j - 1] * (state[j] * dag(expanded_basisⱼ))
state[j] = expanded_basisⱼ
for reference in references
reference[j - 1] = reference[j - 1] * (reference[j] * dag(expanded_basisⱼ))
reference[j] = expanded_basisⱼ
end
end
return state
end

"""
expand(state::MPS, reference::MPO; alg="global_krylov", krylovdim=2, cutoff)

Given an MPS `state` and an MPO `reference`,
returns an MPS which is equal to `state`
(has fidelity 1 with state) but whose MPS basis
is expanded to contain a portion of the basis of
the Krylov space built by repeated applications of
`reference` to `state` that is orthogonal
to the MPS basis of `state`.
The `reference` operator is applied to `state` `krylovdim`
number of times, with a default of 2 which should give
a good balance between reliability and performance.
See [^global_expansion] for more details.

$(expand_cutoff_doctring())

$(expand_warning_doctring())

$(expand_citation_docstring())
"""
function expand(
::Algorithm"global_krylov",
state::MPS,
operator::MPO;
krylovdim=2,
cutoff=(√(eps(real(scalartype(state))))),
apply_kwargs=(; maxdim=maxlinkdim(state) + 1),
)
# TODO: Try replacing this logic with `Base.accumulate`.
references = Vector{MPS}(undef, krylovdim)
for k in 1:krylovdim
previous_reference = get(references, k - 1, state)
references[k] = normalize(apply(operator, previous_reference; apply_kwargs...))
end
return expand(state, references; alg="orthogonalize", cutoff)
end
10 changes: 9 additions & 1 deletion src/sweep_update.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
using ITensors: ITensors, uniqueinds
using ITensors.ITensorMPS:
ITensorMPS, MPS, isortho, orthocenter, orthogonalize!, position!, replacebond!, set_nsite!
ITensorMPS,
MPS,
isortho,
noiseterm,
orthocenter,
orthogonalize!,
position!,
replacebond!,
set_nsite!
using LinearAlgebra: norm, normalize!, svd
using Printf: @printf

Expand Down
1 change: 1 addition & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
94 changes: 94 additions & 0 deletions test/test_expand.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
@eval module $(gensym())
using ITensors: scalartype
using ITensors.ITensorMPS: OpSum, MPO, MPS, inner, linkdims, maxlinkdim, randomMPS, siteinds
using ITensorTDVP: dmrg, expand, tdvp
using LinearAlgebra: normalize
using StableRNGs: StableRNG
using Test: @test, @testset
const elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
@testset "expand (eltype=$elt)" for elt in elts
@testset "expand (alg=\"orthogonalize\", conserve_qns=$conserve_qns, eltype=$elt)" for conserve_qns in
(
false, true
)
n = 6
s = siteinds("S=1/2", n; conserve_qns)
rng = StableRNG(1234)
state = randomMPS(rng, elt, s, j -> isodd(j) ? "↑" : "↓"; linkdims=4)
reference = randomMPS(rng, elt, s, j -> isodd(j) ? "↑" : "↓"; linkdims=2)
state_expanded = expand(state, [reference]; alg="orthogonalize")
@test scalartype(state_expanded) === elt
@test inner(state_expanded, state) ≈ inner(state, state)
@test inner(state_expanded, reference) ≈ inner(state, reference)
end
@testset "expand (alg=\"global_krylov\", conserve_qns=$conserve_qns, eltype=$elt)" for conserve_qns in
(
false, true
)
n = 10
s = siteinds("S=1/2", n; conserve_qns)
opsum = OpSum()
for j in 1:(n - 1)
opsum += 0.5, "S+", j, "S-", j + 1
opsum += 0.5, "S-", j, "S+", j + 1
opsum += "Sz", j, "Sz", j + 1
end
operator = MPO(elt, opsum, s)
state = MPS(elt, s, j -> isodd(j) ? "↑" : "↓")
state_expanded = expand(state, operator; alg="global_krylov")
@test scalartype(state_expanded) === elt
@test maxlinkdim(state_expanded) > 1
@test inner(state_expanded, state) ≈ inner(state, state)
end
@testset "Decoupled ladder (alg=\"global_krylov\", eltype=$elt)" begin
nx = 10
ny = 2
n = nx * ny
s = siteinds("S=1/2", n)
opsum = OpSum()
for j in 1:2:(n - 2)
opsum += 1 / 2, "S+", j, "S-", j + 2
opsum += 1 / 2, "S-", j, "S+", j + 2
opsum += "Sz", j, "Sz", j + 2
end
for j in 2:2:(n - 2)
opsum += 1 / 2, "S+", j, "S-", j + 2
opsum += 1 / 2, "S-", j, "S+", j + 2
opsum += "Sz", j, "Sz", j + 2
end
operator = MPO(elt, opsum, s)
rng = StableRNG(1234)
init = randomMPS(rng, elt, s; linkdims=30)
reference_energy, reference_state = dmrg(
operator,
init;
nsweeps=15,
maxdim=[10, 10, 20, 20, 40, 80, 100],
cutoff=(√(eps(real(elt)))),
noise=(√(eps(real(elt)))),
)
rng = StableRNG(1234)
state = randomMPS(rng, elt, s)
nexpansions = 10
tau = elt(0.5)
for step in 1:nexpansions
# TODO: Use `fourthroot`/`∜` in Julia 1.10 and above.
state = expand(
state, operator; alg="global_krylov", krylovdim=3, cutoff=eps(real(elt))^(1//4)
)
state = tdvp(
operator,
-4tau,
state;
nsteps=4,
cutoff=1e-5,
updater_kwargs=(; tol=1e-3, krylovdim=5),
)
state = normalize(state)
end
@test scalartype(state) === elt
# TODO: Use `fourthroot`/`∜` in Julia 1.10 and above.
@test inner(state', operator, state) ≈ reference_energy rtol = 5 * eps(real(elt))^(1//4)
end
end
end
2 changes: 1 addition & 1 deletion test/test_exports.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ using Test: @test, @testset
@testset "Test exports" begin
@test issetequal(
names(ITensorTDVP),
[:ITensorTDVP, :TimeDependentSum, :dmrg_x, :linsolve, :tdvp, :to_vec],
[:ITensorTDVP, :TimeDependentSum, :dmrg_x, :expand, :linsolve, :tdvp, :to_vec],
)
end
end
Loading