Skip to content

Commit

Permalink
Merge branch 'master' into bMPS
Browse files Browse the repository at this point in the history
  • Loading branch information
lpawela committed Oct 28, 2020
2 parents 1bc447b + a3a39e8 commit bbd3465
Show file tree
Hide file tree
Showing 17 changed files with 463 additions and 112 deletions.
11 changes: 11 additions & 0 deletions .github/workflows/TagBot.yml
@@ -0,0 +1,11 @@
name: TagBot
on:
schedule:
- cron: 0 * * * *
jobs:
TagBot:
runs-on: ubuntu-latest
steps:
- uses: JuliaRegistries/TagBot@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
7 changes: 7 additions & 0 deletions .gitignore
@@ -0,0 +1,7 @@
*.cov
.vscode
.ipynb_checkpoints
*.npz
*.npy
*.pdf
results
6 changes: 5 additions & 1 deletion src/SpinGlassPEPS.jl
Expand Up @@ -22,9 +22,13 @@ module SpinGlassPEPS
const CuArray = CUDA.CuArray
const CuVector = CUDA.CuVector
const CuMatrix = CUDA.CuMatrix
const CuSVD = CUDA.CUSOLVER.CuSVD
const CuQR = CUDA.CUSOLVER.CuQR
# scalar indexing is fine before 0.2
# CUDA.allowscalar(false)
include("cuda.jl")
include("cuda/base.jl")
include("cuda/contractions.jl")
include("cuda/compressions.jl")
end
end
end
Expand Down
19 changes: 10 additions & 9 deletions src/compressions.jl
Expand Up @@ -8,7 +8,7 @@ function LinearAlgebra.qr(M::AbstractMatrix, Dcut::Int)
end

function rq(M::AbstractMatrix, Dcut::Int)
fact = pqrfact(:c, conj(M), rank=Dcut)
fact = pqrfact(:c, conj.(M), rank=Dcut)
Q = fact[:Q]
R = fact[:R]
return _qr_fix!(Q, R)'
Expand All @@ -22,18 +22,18 @@ function _qr_fix!(Q::AbstractMatrix, R::AbstractMatrix)
return transpose(ph) .* q
end

function canonise!::MPS)
function canonise!::AbstractMPS)
canonise!(ψ, :right)
canonise!(ψ, :left)
end

canonise!::MPS, s::Symbol) = canonise!(ψ, Val(s))
canonise!::MPS, ::Val{:right}) = _left_sweep_SVD!(ψ)
canonise!::MPS, ::Val{:left}) = _right_sweep_SVD!(ψ)
canonise!::AbstractMPS, s::Symbol) = canonise!(ψ, Val(s))
canonise!::AbstractMPS, ::Val{:right}) = _left_sweep_SVD!(ψ)
canonise!::AbstractMPS, ::Val{:left}) = _right_sweep_SVD!(ψ)

truncate!::MPS, s::Symbol, Dcut::Int) = truncate!(ψ, Val(s), Dcut)
truncate!::MPS, ::Val{:right}, Dcut::Int) = _left_sweep_SVD!(ψ, Dcut)
truncate!::MPS, ::Val{:left}, Dcut::Int) = _right_sweep_SVD!(ψ, Dcut)
truncate!::AbstractMPS, s::Symbol, Dcut::Int) = truncate!(ψ, Val(s), Dcut)
truncate!::AbstractMPS, ::Val{:right}, Dcut::Int) = _left_sweep_SVD!(ψ, Dcut)
truncate!::AbstractMPS, ::Val{:left}, Dcut::Int) = _right_sweep_SVD!(ψ, Dcut)

function _right_sweep_SVD!::MPS, Dcut::Int=typemax(Int))
Σ = V = ones(eltype(ψ), 1, 1)
Expand Down Expand Up @@ -77,7 +77,8 @@ function _left_sweep_SVD!(ψ::MPS, Dcut::Int=typemax(Int))
end
end

function compress::MPS, Dcut::Int, tol::Number, max_sweeps::Int=4)

function compress::AbstractMPS, Dcut::Int, tol::Number, max_sweeps::Int=4)

# Initial guess - truncated ψ
ϕ = copy(ψ)
Expand Down
22 changes: 11 additions & 11 deletions src/contractions.jl
Expand Up @@ -31,7 +31,7 @@ function left_env(ϕ::MPS, ψ::MPS)

for i 1:l
M = ψ[i]
= conj(ϕ[i])
= conj.(ϕ[i])

C = L[i]
@tensor C[x, y] := M̃[β, σ, x] * C[β, α] * M[α, σ, y] order = (α, β, σ)
Expand All @@ -50,7 +50,7 @@ function right_env(ϕ::MPS, ψ::MPS)

for i L:-1:1
M = ψ[i]
= conj(ϕ[i])
= conj.(ϕ[i])

D = R[i+1]
@tensor D[x, y] := M[x, σ, α] * D[α, β] * M̃[y, σ, β] order = (β, α, σ)
Expand All @@ -59,25 +59,25 @@ function right_env(ϕ::MPS, ψ::MPS)
return R
end

LinearAlgebra.norm::MPS) = sqrt(abs(dot(ψ, ψ)))
LinearAlgebra.norm::AbstractMPS) = sqrt(abs(dot(ψ, ψ)))

function LinearAlgebra.dot::MPS, O::Vector{T}, ψ::MPS) where {T <: AbstractMatrix}
S = promote_type(eltype(ψ), eltype(ϕ), eltype(O[1]))
C = ones(S, 1, 1)

for i 1:length(ψ)
M = ψ[i]
= conj(ϕ[i])
= conj.(ϕ[i])
Mat = O[i]
@tensor C[x, y] := M̃[β, σ, x] * Mat[σ, η] * C[β, α] * M[α, η, y] order = (α, η, β, σ)
end
return C[1]
end

function LinearAlgebra.dot(O::MPO, ψ::MPS)
function LinearAlgebra.dot(O::AbstractMPO, ψ::T) where {T <: AbstractMPS}
L = length(ψ)
T = promote_type(eltype(ψ), eltype(ϕ))
ϕ = MPS(T, L)
S = promote_type(eltype(ψ), eltype(O))
ϕ = T.name.wrapper(S, L)

for i in 1:L
W = O[i]
Expand All @@ -89,7 +89,7 @@ function LinearAlgebra.dot(O::MPO, ψ::MPS)
return ϕ
end

function dot!(O::MPO, ψ::MPS)
function dot!(O::AbstractMPO, ψ::AbstractMPS)
L = length(ψ)
for i in 1:L
W = O[i]
Expand All @@ -100,9 +100,9 @@ function dot!(O::MPO, ψ::MPS)
end
end

Base.:(*)(O::MPO, ψ::MPS) = return dot(O, ψ)
Base.:(*)(O::AbstractMPO, ψ::AbstractMPS) = return dot(O, ψ)

function LinearAlgebra.dot(O1::MPO, O2::MPO)
function LinearAlgebra.dot(O1::AbstractMPO, O2::AbstractMPO)
L = length(O1)
T = promote_type(eltype(ψ), eltype(ϕ))
O = MPO(T, L)
Expand All @@ -117,4 +117,4 @@ function LinearAlgebra.dot(O1::MPO, O2::MPO)
return O
end

Base.:(*)(O1::MPO, O2::MPO) = dot(O1, O2)
Base.:(*)(O1::AbstractMPO, O2::AbstractMPO) = dot(O1, O2)
55 changes: 0 additions & 55 deletions src/cuda.jl

This file was deleted.

66 changes: 66 additions & 0 deletions src/cuda/base.jl
@@ -0,0 +1,66 @@
for (T, N) in ((:MPO, 4), (:MPS, 3))
CuT = Symbol(:Cu, T)
AT = Symbol(:Abstract, T)
@eval begin
export $CuT

struct $CuT{T <: Number} <: $AT{T}
tensors::Vector{CuArray{T, $N}}
end

$CuT(::Type{T}, L::Int) where {T} = $CuT(Vector{CuArray{T, $N}}(undef, L))
$CuT(L::Int) = $CuT(Float32, L)

Base.copy(a::$CuT) = $CuT(copy(a.tensors))
end
end

function CUDA.randn(::Type{CuMPS{T}}, L::Int, D::Int, d::Int) where {T}
ψ = CuMPS(L)
ψ[1] = CUDA.randn(T, 1, d, D)
for i 2:(L-1)
ψ[i] = CUDA.randn(T, D, d, D)
end
ψ[end] = CUDA.randn(T, D, d, 1)
ψ
end

function CUDA.randn(::Type{CuMPO{T}}, L::Int, D::Int, d::Int) where {T}
ψ = CUDA.randn(CuMPS{T}, L, D, d^2)
CuMPO(ψ)
end

function CuMPS(vec::CuVector{<:Number})
L = length(vec)
ψ = CuMPS(L)
for i 1:L
ψ[i] = reshape(copy(vec[i]), 1, :, 1)
end
end

function CuMPO::CuMPS)
_verify_square(ψ)
L = length(ψ)
O = CuMPO(eltype(ψ), L)

for i 1:L
A = ψ[i]
d = isqrt(size(A, 2))

@cast W[x, σ, y, η] |= A[x, (σ, η), y] (σ:d)
O[i] = W
end
O
end

function CuMPS(O::CuMPO)
L = length(O)
ψ = CuMPS(eltype(O), L)

for i 1:L
W = O[i]
@cast A[x, (σ, η), y] := W[x, σ, y, η]
ψ[i] = A
end
return ψ
end

0 comments on commit bbd3465

Please sign in to comment.