From 42bbdfaddb0df12df9ee1f914d508164aacc6528 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Thu, 29 Aug 2019 19:00:08 -0400 Subject: [PATCH 01/27] WIP: new Tensor design --- src/ITensors.jl | 19 +- src/index.jl | 5 + src/indexset.jl | 38 +- src/itensor.jl | 260 ++++++----- src/sets.jl | 10 + src/storage/dense.jl | 359 --------------- src/storage/tensorstorage.jl | 67 --- src/{storage => tensor}/combiner.jl | 0 src/{storage => tensor}/contract.jl | 0 src/tensor/dense.jl | 426 ++++++++++++++++++ src/{storage => tensor}/diag.jl | 0 src/{storage => tensor}/svd.jl | 0 src/tensor/tensor.jl | 59 +++ src/tensor/tensorstorage.jl | 67 +++ ...{test_itensor.jl => test_itensor_dense.jl} | 13 +- ...st_diagITensor.jl => test_itensor_diag.jl} | 0 16 files changed, 741 insertions(+), 582 deletions(-) create mode 100644 src/sets.jl delete mode 100644 src/storage/dense.jl delete mode 100644 src/storage/tensorstorage.jl rename src/{storage => tensor}/combiner.jl (100%) rename src/{storage => tensor}/contract.jl (100%) create mode 100644 src/tensor/dense.jl rename src/{storage => tensor}/diag.jl (100%) rename src/{storage => tensor}/svd.jl (100%) create mode 100644 src/tensor/tensor.jl create mode 100644 src/tensor/tensorstorage.jl rename test/{test_itensor.jl => test_itensor_dense.jl} (96%) rename test/{test_diagITensor.jl => test_itensor_diag.jl} (100%) diff --git a/src/ITensors.jl b/src/ITensors.jl index 49d6cba42b..a11e360a4b 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -52,18 +52,25 @@ import Base.adjoint, const warnTensorOrder = 10 +##################################### +# Helper functionality +# + +include("sets.jl") + ##################################### include("smallstring.jl") include("tagset.jl") include("index.jl") include("indexset.jl") -include("storage/tensorstorage.jl") -include("storage/dense.jl") -include("storage/diag.jl") -include("storage/combiner.jl") -include("storage/contract.jl") -include("storage/svd.jl") +include("tensor/tensor.jl") +include("tensor/tensorstorage.jl") +include("tensor/dense.jl") +include("tensor/diag.jl") +include("tensor/combiner.jl") +include("tensor/contract.jl") +include("tensor/svd.jl") include("itensor.jl") include("decomp.jl") include("iterativesolvers.jl") diff --git a/src/index.jl b/src/index.jl index 4cbef20ca6..b3242097f8 100644 --- a/src/index.jl +++ b/src/index.jl @@ -81,6 +81,11 @@ dir(i::Index) = i.dir tags(i::Index) = i.tags plev(i::Index) = plev(tags(i)) +# Overload for use in AbstractArray interface for Tensor +#Base.length(i::Index) = dim(i) +#Base.UnitRange(i::Index) = 1:dim(i) +#Base.checkindex(::Type{Bool}, i::Index, val::Int64) = (val ≤ dim(i) && val ≥ 1) + """ ==(i1::Index, i1::Index) diff --git a/src/indexset.jl b/src/indexset.jl index f824e4fcbe..11f72f0b93 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -16,24 +16,31 @@ export IndexSet, uniqueindex, dims, minDim, - maxDim + maxDim, + push, + permute -struct IndexSet - inds::Vector{Index} - IndexSet(inds::Vector{Index}) = new(inds) +struct IndexSet{N} + inds::MVector{N,Index} + IndexSet(inds::MVector{N,Index}) where {N} = new{N}(inds) + IndexSet(inds::NTuple{N,Index}) where {N} = new{N}(inds) end inds(is::IndexSet) = is.inds +function permute(is::IndexSet{N},perm) where {N} + indsp = ntuple(i->is[perm[i]], Val(N)) + return IndexSet(indsp) +end + # Empty constructor -IndexSet() = IndexSet(Index[]) +IndexSet() = IndexSet(()) # Construct of some size -IndexSet(N::Integer) = IndexSet(Vector{Index}(undef,N)) +IndexSet(::Val{N}) where {N} = IndexSet(ntuple(_->Index(),Val(N))) # Construct from various sets of indices -IndexSet(inds::Index...) = IndexSet(Index[inds...]) -IndexSet(inds::NTuple{N,Index}) where {N} = IndexSet(inds...) +IndexSet(inds::Vararg{Index,N}) where {N} = IndexSet(inds) # Construct from various sets of IndexSets IndexSet(inds::IndexSet) = inds @@ -57,7 +64,7 @@ setindex!(is::IndexSet,i::Index,n::Integer) = setindex!(is.inds,i,n) length(is::IndexSet) = length(is.inds) order(is::IndexSet) = length(is) copy(is::IndexSet) = IndexSet(copy(is.inds)) -dims(is::IndexSet) = Tuple(dim(i) for i ∈ is) +dims(is::IndexSet{N}) where {N} = ntuple(i->dim(is[i]),Val(N)) dim(is::IndexSet) = prod(dim.(is)) dim(is::IndexSet,pos::Integer) = dim(is[pos]) @@ -74,9 +81,10 @@ Base.stride(is::IndexSet,k::Integer) = strides(is)[k] dag(is::IndexSet) = IndexSet(dag.(is.inds)) # Allow iteration -iterate(is::IndexSet,state::Int=1) = iterate(is.inds,state) +Base.iterate(is::IndexSet{N},state::Int=1) where {N} = state > N ? nothing : (is[state], state+1) -push!(is::IndexSet,i::Index) = push!(is.inds,i) +#push!(is::IndexSet,i::Index) = push!(is.inds,i) +StaticArrays.push(is::IndexSet,i::Index) = push(is.inds,i) """ minDim(is::IndexSet) @@ -434,7 +442,7 @@ function swaptags!(is::IndexSet, end swaptags(is, vargs...) = swaptags!(copy(is), vargs...) -function calculate_permutation(set1, set2) +function getperm(set1, set2) l1 = length(set1) l2 = length(set2) l1==l2 || throw(DimensionMismatch("Mismatched input sizes in calcPerm: l1=$l1, l2=$l2")) @@ -451,9 +459,9 @@ function calculate_permutation(set1, set2) return p end -function compute_contraction_labels(Ai::IndexSet,Bi::IndexSet) - rA = order(Ai) - rB = order(Bi) +function compute_contraction_labels(Ai,Bi) + rA = length(Ai) + rB = length(Bi) Aind = zeros(Int,rA) Bind = zeros(Int,rB) diff --git a/src/itensor.jl b/src/itensor.jl index 293acdc3ce..a6f906ecd0 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -7,6 +7,7 @@ export ITensor, replaceindex!, inds, isNull, + scale!, normalize!, multSiteOps, order, @@ -18,22 +19,26 @@ export ITensor, dense -mutable struct ITensor - inds::IndexSet +mutable struct ITensor{N} + inds::IndexSet{N} store::TensorStorage #TODO: check that the storage is consistent with the #total dimension of the indices (possibly only in debug mode); - ITensor(is::IndexSet,st::T) where T = new(is,st) + ITensor(is::IndexSet{N},st) where {N} = new{N}(is,st) end # # Dense ITensor constructors # +ITensor(T::Tensor) = ITensor(inds(T),store(T)) + ITensor() = ITensor(IndexSet(),Dense{Nothing}()) ITensor(is::IndexSet) = ITensor(Float64,is...) ITensor(inds::Index...) = ITensor(IndexSet(inds...)) +Tensor(A::ITensor) = Tensor(store(A),inds(A)) + function ITensor(::Type{T}, inds::IndexSet) where {T<:Number} return ITensor(inds,Dense{float(T)}(zero(float(T)),dim(inds))) @@ -183,14 +188,16 @@ dense(T::ITensor) Make a copy of the ITensor where the storage is the dense version. For example, an ITensor with Diag storage will become Dense storage. """ -dense(T::ITensor) = ITensor(inds(T),storage_dense(store(T),inds(T))) +function dense(T::ITensor) + ITensor(dense(Tensor(store(T),inds(T)))) +end """ complex(T::ITensor) Convert to the complex version of the storage. """ -Base.complex(T::ITensor) = ITensor(inds(T),storage_complex(store(T))) +Base.complex(T::ITensor) = ITensor(complex(Tensor(store(T),inds(T)))) inds(T::ITensor) = T.inds store(T::ITensor) = T.store @@ -222,82 +229,77 @@ dim(T::ITensor) = dim(inds(T)) Tuple containing `size(A,d) == dim(inds(A)[d]) for d in 1:ndims(A)`. """ dims(T::ITensor) = dims(inds(T)) -size(A::ITensor) = dims(inds(A)) -size(A::ITensor, d::Int) = d in 1:ndims(A) ? dim(inds(A)[d]) : +Base.size(A::ITensor) = dims(inds(A)) +Base.size(A::ITensor{N}, d::Int) where {N} = d in 1:N ? dim(inds(A)[d]) : d>0 ? 1 : error("arraysize: dimension out of range") -isNull(T::ITensor) = (store(T) isa Dense{Nothing}) +isNull(T::ITensor) = (eltype(T) === Nothing) -copy(T::ITensor) = ITensor(copy(inds(T)),copy(store(T))) +Base.copy(T::ITensor) = ITensor(copy(Tensor(T))) -Array(T::ITensor) = storage_convert(Array,store(T),inds(T)) +Base.Array(T::ITensor) = Array(Tensor(T)) -Array(T::ITensor,ninds::Index...) = storage_convert(Array,store(T),inds(T),IndexSet(ninds)) +Base.Matrix(T::ITensor{N}) where {N} = (N==2 ? Array(Tensor(T)) : throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix"))) -function Matrix(A::ITensor,i1::Index,i2::Index) - if ndims(A) != 2 - throw(DimensionMismatch("Matrix() expected a 2-index ITensor")) - end - return Array(A,i1,i2) -end +Base.Vector(T::ITensor{N}) where {N} = (N==1 ? Array(Tensor(T)) : throw(DimensionMismatch("ITensor must be order 1 to convert to a Vector"))) -Matrix(A::ITensor) = Matrix(A,inds(A)...) +scalar(T::ITensor) = T[] -function Vector(A::ITensor) - if ndims(A) != 1 - throw(DimensionMismatch("Vector() expected a 1-index ITensor")) - end - return Array(A,inds(A)...) +#Array(T::ITensor) = storage_convert(Array,store(T),inds(T)) + +function Base.Array(T::ITensor{N},is::Vararg{Index,N}) where {N} + perm = getperm(inds(T),is) + return Array(permutedims(Tensor(T),perm)) end -function getindex(T::ITensor,vals::Int...) - if order(T) ≠ length(vals) - error("In getindex(::ITensor,::Int..), number of \\ - values provided ($(length(vals))) must equal \\ - order of ITensor ($(order(T)))") - end - storage_getindex(store(T),inds(T),vals...) +function Base.Matrix(T::ITensor{N},i1::Index,i2::Index) where {N} + N≠2 && throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix")) + return Array(T,i1,i2) end -function getindex(T::ITensor,ivs::IndexVal...) - p = calculate_permutation(inds(T),ivs) +Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = Tensor(T)[vals...] + +function Base.getindex(T::ITensor{N},ivs::Vararg{IndexVal,N}) where {N} + p = getperm(inds(T),ivs) vals = val.(ivs)[p] - return getindex(T,vals...) + return T[vals...] end -function getindex(T::ITensor,ivs::Union{IndexVal, AbstractVector{IndexVal}}...) - p = calculate_permutation(inds(T),map(x->x isa IndexVal ? x : x[1], ivs)) - vals = map(x->x isa IndexVal ? val(x) : val.(x), ivs[p]) - return storage_getindex(store(T),inds(T),vals...) -end +# TODO: what is this? +#function getindex(T::ITensor,ivs::Union{IndexVal, AbstractVector{IndexVal}}...) +# p = getperm(inds(T),map(x->x isa IndexVal ? x : x[1], ivs)) +# vals = map(x->x isa IndexVal ? val(x) : val.(x), ivs[p]) +# return Tensor(store(T),inds(T))[vals...] +#end -getindex(T::ITensor) = scalar(T) +Base.getindex(T::ITensor) = Tensor(T)[] -setindex!(T::ITensor,x::Number,vals::Int...) = storage_setindex!(store(T),inds(T),x,vals...) +Base.setindex!(T::ITensor{N},x::Number,vals::Vararg{Int,N}) where {N} = (Tensor(T)[vals...] = x) -function setindex!(T::ITensor,x::Number,ivs::IndexVal...) - p = calculate_permutation(inds(T),ivs) +function Base.setindex!(T::ITensor,x::Number,ivs::IndexVal...) + p = getperm(inds(T),ivs) vals = val.(ivs)[p] - return setindex!(T,x,vals...) -end - -function setindex!(T::ITensor, - x::Union{<:Number, AbstractArray{<:Number}}, - ivs::Union{IndexVal, AbstractVector{IndexVal}}...) - remap_ivs = map(x->x isa IndexVal ? x : x[1], ivs) - p = calculate_permutation(inds(T),remap_ivs) - vals = map(x->x isa IndexVal ? val(x) : val.(x), ivs[p]) - storage_setindex!(store(T),inds(T),x,vals...) - return T -end - -function fill!(T::ITensor, - x::Number) - # TODO: automatically switch storage type if needed - storage_fill!(store(T),x) + return T[vals...] = x +end + +#function setindex!(T::ITensor, +# x::Union{<:Number, AbstractArray{<:Number}}, +# ivs::Union{IndexVal, AbstractVector{IndexVal}}...) +# remap_ivs = map(x->x isa IndexVal ? x : x[1], ivs) +# p = getperm(inds(T),remap_ivs) +# vals = map(x->x isa IndexVal ? val(x) : val.(x), ivs[p]) +# storage_setindex!(store(T),inds(T),x,vals...) +# return T +#end + +function Base.fill!(T::ITensor, + x::Number) + # TODO: automatically switch storage type if needed? + Tensor(T) .= x return T end +# TODO: implement in terms of delta tensors (better for QNs) function replaceindex!(A::ITensor,i::Index,j::Index) pos = indexpositions(A,i) inds(A)[pos[1]] = j @@ -330,30 +332,31 @@ settags(A::ITensor,vargs...) = ITensor(settags(inds(A),vargs...),store(A)) swaptags(A::ITensor,vargs...) = ITensor(swaptags(inds(A),vargs...),store(A)) -function ==(A::ITensor,B::ITensor) +# TODO: implement in a better way (more generically for other storage) +function Base.:(==)(A::ITensor,B::ITensor) !hassameinds(A,B) && return false - p = calculate_permutation(inds(B),inds(A)) + #IndexVal.(inds(A),Tuple(i)) + p = getperm(inds(B),inds(A)) for i ∈ CartesianIndices(dims(A)) A[Tuple(i)...] ≠ B[Tuple(i)[p]...] && return false end return true end -function isapprox(A::ITensor, - B::ITensor; - atol::Real=0.0, - rtol::Real=Base.rtoldefault(eltype(A),eltype(B),atol)) +function Base.isapprox(A::ITensor, + B::ITensor; + atol::Real=0.0, + rtol::Real=Base.rtoldefault(eltype(A),eltype(B),atol)) return norm(A-B) <= atol + rtol*max(norm(A),norm(B)) end -function scalar(T::ITensor) - !(order(T)==0 || dim(T)==1) && throw(ArgumentError("ITensor with inds $(inds(T)) is not a scalar")) - return storage_scalar(store(T)) -end +#function scalar(T::ITensor) +# !(order(T)==0 || dim(T)==1) && throw(ArgumentError("ITensor with inds $(inds(T)) is not a scalar")) +# return scalar(Tensor(store(T),inds(T))) +#end -function randn!(T::ITensor) - storage_randn!(store(T)) - return T +function Random.randn!(T::ITensor) + return randn!(Tensor(store(T),inds(T))) end const Indices = Union{IndexSet,Tuple{Vararg{Index}}} @@ -377,52 +380,59 @@ combiner(inds::Index...; kwargs...) = combiner(IndexSet(inds...); kwargs...) combinedindex(T::ITensor) = store(T) isa CombinerStorage ? store(T).ci : Index() -norm(T::ITensor) = storage_norm(store(T)) -dag(T::ITensor) = ITensor(storage_dag(store(T),inds(T))...) +LinearAlgebra.norm(T::ITensor) = norm(Tensor(T)) -function permute(T::ITensor,permTinds) - permTis = IndexSet(permTinds) - permTstore = typeof(store(T))(dim(T)) - storage_permute!(permTstore,permTis,store(T),inds(T)) - return ITensor(permTis,permTstore) +function dag(T::ITensor) + TT = conj(Tensor(T)) + return ITensor(dag(inds(T)),store(TT)) +end + +function permute(T::ITensor,new_inds) + #permTis = IndexSet(permTinds) + #permTstore = typeof(store(T))(dim(T)) + #storage_permute!(permTstore,permTis,store(T),inds(T)) + perm = getperm(inds(T),new_inds) + Tp = permutedims(Tensor(store(T),inds(T)),perm) + return ITensor(Tp) end permute(T::ITensor,inds::Index...) = permute(T,IndexSet(inds...)) -function *(A::ITensor,x::Number) - storeB = storage_mult(store(A), x) - return ITensor(inds(A),storeB) +function Base.:*(T::ITensor,x::Number) + return ITensor(x*Tensor(T)) end -*(x::Number,A::ITensor) = A*x +Base.:*(x::Number,T::ITensor) = T*x #TODO: make a proper element-wise division -/(A::ITensor,x::Number) = A*(1.0/x) +Base.:/(A::ITensor,x::Number) = A*(1.0/x) --(A::ITensor) = -one(eltype(A))*A -function +(A::ITensor,B::ITensor) +Base.:-(A::ITensor) = ITensor(-Tensor(A)) +function Base.:+(A::ITensor,B::ITensor) C = copy(A) add!(C,B) return C end -function -(A::ITensor,B::ITensor) +function Base.:-(A::ITensor,B::ITensor) C = copy(A) add!(C,-1,B) return C end -#TODO: Add special case of A==B -#A==B && return ITensor(norm(A)^2) -#TODO: Add more of the contraction logic here? -#We can move the logic of getting the integer labels, -#etc. since they are generic for all storage types function *(A::ITensor,B::ITensor) - (Cis,Cstore) = storage_contract(store(A),inds(A),store(B),inds(B)) - C = ITensor(Cis,Cstore) + (Alabels,Blabels) = compute_contraction_labels(inds(A),inds(B)) + # TODO: implement contract + CT = contract(Tensor(A),Alabels,Tensor(B),Blabels) + C = ITensor(CT) + + # OLD INTERFACE + ###(Cis,Cstore) = storage_contract(store(A),inds(A),store(B),inds(B)) + ###C = ITensor(Cis,Cstore) + if warnTensorOrder > 0 && order(C) >= warnTensorOrder println("Warning: contraction resulted in ITensor with $(order(C)) indices") end return C end -dot(A::ITensor,B::ITensor) = scalar(dag(A)*B) +dot(A::ITensor,B::ITensor) = (dag(A)*B)[] ####################################################################### # @@ -444,9 +454,10 @@ Copy the contents of ITensor A into ITensor B. B .= A ``` """ -function copyto!(A::ITensor,B::ITensor) - storage_copyto!(store(A),inds(A),store(B),inds(B)) - return A +function Base.copyto!(R::ITensor{N},T::ITensor{N}) where {N} + perm = getperm(inds(R),inds(T)) + permutedims!(Tensor(R),Tensor(T),perm) + return R end """ @@ -459,20 +470,15 @@ B .+= A B .+= α .* A ``` """ -function add!(B::ITensor,A::ITensor) - # TODO: is replacing the storage entirely the best way - # to do this logic? Is this worse in the case that - # the storage type stays the same? - B.store = storage_add!(store(B),inds(B),store(A),inds(A)) - return B -end +add!(T1::ITensor,T2::ITensor) = add!(T1,1,T2) -function add!(A::ITensor,x::Number,B::ITensor) +function add!(T1::ITensor,α2::Number,T2::ITensor) # TODO: is replacing the storage entirely the best way # to do this logic? Is this worse in the case that # the storage type stays the same? - A.store = storage_add!(store(A),inds(A),store(B),inds(B),x) - return A + perm = getperm(inds(T1),inds(T2)) + permutedims!(Tensor(T1),Tensor(T2),perm,(t1,t2)->t1+α2*t2) + return T1 end """ @@ -483,12 +489,10 @@ Add ITensors α*A and β*B and store the result in A. A .= α .* A .+ β .* B ``` """ -function add!(A::ITensor,y::Number,x::Number,B::ITensor) - # TODO: is replacing the storage entirely the best way - # to do this logic? Is this worse in the case that - # the storage type stays the same? - A.store = storage_add!(y*store(A),inds(A),store(B),inds(B),x) - return A +function add!(R::ITensor,αr::Number,αt::Number,T::ITensor) + perm = getperm(inds(R),inds(T)) + permutedims!(Tensor(R),Tensor(T),perm,(r,t)->αr*r+αt*t) + return R end """ @@ -513,10 +517,12 @@ Scale the ITensor A by x in-place. May also be written `rmul!`. A .*= x ``` """ -function scale!(A::ITensor,x::Number) - storage_mult!(store(A), x) - return A +function scale!(T::ITensor,x::Number) + TT = Tensor(T) + TT .*= x + return T end +rmul!(T::ITensor,fac::Number) = scale!(T,fac) """ mul!(A::ITensor,x::Number,B::ITensor) @@ -524,13 +530,8 @@ end Scalar multiplication of ITensor B with x, and store the result in A. Like `A .= x .* B`, and equivalent to `add!(A, 0, x, B)`. """ -function mul!(A::ITensor,x::Number,B::ITensor) - storage_copyto!(store(A),inds(A),store(B),inds(B),x) - return A -end -mul!(R::ITensor,T::ITensor,fac::Number) = mul!(R,fac,T) - -rmul!(T::ITensor,fac::Number) = scale!(T,fac) +mul!(R::ITensor,α::Number,T::ITensor) = add!(R,0,α,T) +mul!(R::ITensor,T::ITensor,α::Number) = mul!(R,α,T) function summary(io::IO, T::ITensor) @@ -557,16 +558,13 @@ function show(io::IO, summary(io,T) end -function similar(T::ITensor, - element_type=eltype(T))::ITensor - if element_type != eltype(T) - error("similar(::ITensor) currently only defined for same element type") - end - return copy(T) +function Base.similar(T::ITensor, + element_type=eltype(T)) + return ITensor(similar(Tensor(T),element_type)) end function multSiteOps(A::ITensor, - B::ITensor)::ITensor + B::ITensor) R = copy(A) prime!(R,"Site") R *= B diff --git a/src/sets.jl b/src/sets.jl new file mode 100644 index 0000000000..fa54fd8110 --- /dev/null +++ b/src/sets.jl @@ -0,0 +1,10 @@ + +function isdisjoint(s1,s2) + for i1 ∈ length(s1) + for i2 ∈ length(s2) + s1[i1] == s2[i2] && return false + end + end + return true +end + diff --git a/src/storage/dense.jl b/src/storage/dense.jl deleted file mode 100644 index eaac70bf2b..0000000000 --- a/src/storage/dense.jl +++ /dev/null @@ -1,359 +0,0 @@ -export Dense - -contract_t = 0.0 - -struct Dense{T} <: TensorStorage - data::Vector{T} - Dense{T}(data::Vector) where {T} = new{T}(convert(Vector{T},data)) - Dense{T}(size::Integer) where {T} = new{T}(Vector{T}(undef,size)) - Dense{T}(x::Number,size::Integer) where {T} = new{T}(fill(convert(T,x),size)) - Dense{T}() where {T} = new{T}(Vector{T}()) -end - -data(D::Dense) = D.data -length(D::Dense) = length(data(D)) -eltype(D::Dense) = eltype(data(D)) -Base.getindex(D::Dense,i::Integer) = getindex(data(D),i) -Base.setindex!(D::Dense,x,i::Integer) = setindex!(data(D),x,i) -#TODO: this should do proper promotions of the storage data -#e.g. ComplexF64*Dense{Float64} -> Dense{ComplexF64} -*(D::Dense{T},x::S) where {T,S<:Number} = Dense{promote_type(T,S)}(x*data(D)) -*(x::Number,D::Dense) = D*x - -Base.promote_type(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} - -convert(::Type{Dense{T}},D::Dense) where {T} = Dense{T}(data(D)) - -storage_convert(::Type{Dense{T}},D::Dense,is::IndexSet) where {T} = convert(Dense{T},D) - -# convert to complex -storage_complex(D::Dense{T}) where {T} = Dense{complex(T)}(complex(data(D))) - -copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) - -function storage_outer(D1::Dense{T},is1::IndexSet,D2::Dense{S},is2::IndexSet) where {T, S <:Number} - return Dense{promote_type(T,S)}(vec(data(D1)*transpose(data(D2)))),IndexSet(is1,is2) -end - -storage_convert(::Type{Array},D::Dense,is::IndexSet) = reshape(data(D),dims(is)) - -function storage_convert(::Type{Array}, - D::Dense, - ois::IndexSet, - nis::IndexSet) - P = calculate_permutation(nis,ois) - A = reshape(data(D),dims(ois)) - return permutedims(A,P) -end - -storage_fill!(D::Dense,x::Number) = fill!(data(D),x) - -function storage_getindex(Tstore::Dense{T}, - Tis::IndexSet, - vals::Union{Int, AbstractVector{Int}}...) where {T} - return getindex(reshape(data(Tstore),dims(Tis)),vals...) -end - -function storage_setindex!(Tstore::Dense, - Tis::IndexSet, - x::Union{<:Number, AbstractArray{<:Number}}, - vals::Union{Int, AbstractVector{Int}}...) - return setindex!(reshape(data(Tstore),dims(Tis)),x,vals...) -end - -# For use in _add! -using Base.Cartesian: @nexprs, - @ntuple, - @nloops - -# -# A generalized permutedims!(P,B,perm) that also allows -# a function to be applied elementwise (defaults to addition of -# the elements of P and the permuted elements of B) -# -# Based off of the permutedims! implementation in Julia's base: -# https://github.com/JuliaLang/julia/blob/91151ab871c7e7d6689d1cfa793c12062d37d6b6/base/multidimensional.jl#L1355 -# -@generated function _add!(P::Array{T,N}, - B::Array{S,N}, - perm, - f = (x,y)->x+y) where {T,S,N} - quote - Base.checkdims_perm(P, B, perm) - - #calculates all the strides - native_strides = Base.size_to_strides(1, size(B)...) - strides_1 = 0 - @nexprs $N d->(strides_{d+1} = native_strides[perm[d]]) - - #Creates offset, because indexing starts at 1 - offset = 1 - sum(@ntuple $N d->strides_{d+1}) - - ind = 1 - @nexprs 1 d->(counts_{$N+1} = strides_{$N+1}) # a trick to set counts_($N+1) - @nloops($N, i, P, - d->(counts_d = strides_d), # PRE - d->(counts_{d+1} += strides_{d+1}), # POST - begin # BODY - sumc = sum(@ntuple $N d->counts_{d+1}) - @inbounds P[ind] = f(P[ind],B[sumc+offset]) - ind += 1 - end) - - return P - end -end - -function _add!(Bstore::Dense, - Bis::IndexSet, - Astore::Dense, - Ais::IndexSet, - x::Number = 1) - p = calculate_permutation(Bis,Ais) - Adata = data(Astore) - Bdata = data(Bstore) - if is_trivial_permutation(p) - if x == 1 - Bdata .+= Adata - else - Bdata .= Bdata .+ x .* Adata - end - else - reshapeBdata = reshape(Bdata,dims(Bis)) - reshapeAdata = reshape(Adata,dims(Ais)) - if x == 1 - _add!(reshapeBdata,reshapeAdata,p) - else - _add!(reshapeBdata,reshapeAdata,p,(a,b)->a+x*b) - end - end -end - -function storage_copyto!(Bstore::Dense, - Bis::IndexSet, - Astore::Dense, - Ais::IndexSet, - x::Number = 1) - p = calculate_permutation(Bis,Ais) - Adata = data(Astore) - Bdata = data(Bstore) - if is_trivial_permutation(p) - if x == 1 - Bdata .= Adata - else - Bdata .= x .* Adata - end - else - reshapeBdata = reshape(Bdata,dims(Bis)) - reshapeAdata = reshape(Adata,dims(Ais)) - if x == 1 - _add!(reshapeBdata,reshapeAdata,p,(a,b)->b) - else - _add!(reshapeBdata,reshapeAdata,p,(a,b)->x*b) - end - end -end - -function storage_mult!(Astore::Dense, - x::Number) - Adata = data(Astore) - rmul!(Adata, x) -end - -function storage_mult(Astore::Dense, - x::Number) - Bstore = copy(Astore) - storage_mult!(Bstore, x) - return Bstore -end - -# For Real storage and complex scalar, promotion -# of the storage is needed -function storage_mult(Astore::Dense{T}, - x::S) where {T<:Real,S<:Complex} - Bstore = convert(Dense{promote_type(S,T)},Astore) - storage_mult!(Bstore, x) - return Bstore -end - - -# TODO: make this a special version of storage_add!() -# Make sure the permutation is optimized -function storage_permute!(Bstore::Dense, Bis::IndexSet, - Astore::Dense, Ais::IndexSet) - p = calculate_permutation(Bis,Ais) - Adata = data(Astore) - Bdata = data(Bstore) - if is_trivial_permutation(p) - Bdata .= Adata - else - reshapeBdata = reshape(Bdata,dims(Bis)) - permutedims!(reshapeBdata,reshape(Adata,dims(Ais)),p) - end -end - -function storage_dag(Astore::Dense,Ais::IndexSet) - return dag(Ais),storage_conj(Astore) -end - -function storage_scalar(D::Dense) - length(D)==1 && return D[1] - throw(ErrorException("Cannot convert Dense -> Number for length of data greater than 1")) -end - -function _contract(Cinds::IndexSet, Clabels::Vector{Int}, - Astore::Dense{SA}, Ainds::IndexSet, Alabels::Vector{Int}, - Bstore::Dense{SB}, Binds::IndexSet, Blabels::Vector{Int}) where {SA<:Number,SB<:Number} - SC = promote_type(SA,SB) - - # Convert the arrays to a common type - # since we will call BLAS - Astore = convert(Dense{SC},Astore) - Bstore = convert(Dense{SC},Bstore) - - Adims = dims(Ainds) - Bdims = dims(Binds) - Cdims = dims(Cinds) - - # Create storage for output tensor - Cstore = Dense{SC}(prod(Cdims)) - - Adata = reshape(data(Astore),Adims) - Bdata = reshape(data(Bstore),Bdims) - Cdata = reshape(data(Cstore),Cdims) - - if(length(Alabels)==0) - contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) - elseif(length(Blabels)==0) - contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) - else - props = CProps(Alabels,Blabels,Clabels) - compute!(props,Adata,Bdata,Cdata) - _contract_dense_dense!(Cdata,props,Adata,Bdata) - end - - return Cstore -end - -function storage_svd(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet; - kwargs...) where {T} - maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) - mindim::Int = get(kwargs,:mindim,1) - cutoff::Float64 = get(kwargs,:cutoff,0.0) - absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) - doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - utags::String = get(kwargs,:utags,"Link,u") - vtags::String = get(kwargs,:vtags,"Link,v") - fastSVD::Bool = get(kwargs,:fastSVD,false) - - if fastSVD - MU,MS,MV = svd(reshape(data(Astore),dim(Lis),dim(Ris))) - else - MU,MS,MV = recursiveSVD(reshape(data(Astore),dim(Lis),dim(Ris))) - end - MV = conj!(MV) - - P = MS.^2 - #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff - truncate!(P;mindim=mindim, - maxdim=maxdim, - cutoff=cutoff, - absoluteCutoff=absoluteCutoff, - doRelCutoff=doRelCutoff) - dS = length(P) - if dS < length(MS) - MU = MU[:,1:dS] - resize!(MS,dS) - MV = MV[:,1:dS] - end - - u = Index(dS,utags) - v = settags(u,vtags) - Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) - #TODO: make a diag storage - Sis,Sstore = IndexSet(u,v),Diag{Vector{Float64}}(MS) - Vis,Vstore = IndexSet(Ris...,v),Dense{T}(Vector{T}(vec(MV))) - - return (Uis,Ustore,Sis,Sstore,Vis,Vstore) -end - -function storage_eigen(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet; - kwargs...) where {T} - maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) - mindim::Int = get(kwargs,:mindim,1) - cutoff::Float64 = get(kwargs,:cutoff,0.0) - absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) - doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - tags::TagSet = get(kwargs,:lefttags,"Link,u") - lefttags::TagSet = get(kwargs,:lefttags,tags) - righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) - - dim_left = dim(Lis) - dim_right = dim(Ris) - MD,MU = eigen(Hermitian(reshape(data(Astore),dim_left,dim_right))) - - # Sort by largest to smallest eigenvalues - p = sortperm(MD; rev = true) - MD = MD[p] - MU = MU[:,p] - - #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff - truncate!(MD;maxdim=maxdim, - cutoff=cutoff, - absoluteCutoff=absoluteCutoff, - doRelCutoff=doRelCutoff) - dD = length(MD) - if dD < size(MU,2) - MU = MU[:,1:dD] - end - - #TODO: include truncation parameters as keyword arguments - u = Index(dD,lefttags) - v = settags(u,righttags) - Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) - Dis,Dstore = IndexSet(u,v),Diag{Vector{Float64}}(MD) - return (Uis,Ustore,Dis,Dstore) -end - -# TODO: move this to a general "linear_algebra.jl" file? -# i.e. a common place for custom linear algebra functionality -# for matrices -function polar(A::Matrix) - U,S,V = svd(A) # calls LinearAlgebra.svd() - return U*V',V*Diagonal(S)*V' -end - -function storage_qr(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet; - kwargs...) where {T} - tags::TagSet = get(kwargs,:tags,"Link,u") - dim_left = dim(Lis) - dim_right = dim(Ris) - MQ,MP = qr(reshape(data(Astore),dim_left,dim_right)) - dim_middle = min(dim_left,dim_right) - u = Index(dim_middle,tags) - #Must call Matrix() on MQ since the QR decomposition outputs a sparse - #form of the decomposition - Qis,Qstore = IndexSet(Lis...,u),Dense{T}(vec(Matrix(MQ))) - Pis,Pstore = IndexSet(u,Ris...),Dense{T}(vec(Matrix(MP))) - return (Qis,Qstore,Pis,Pstore) -end - -function storage_polar(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet) where {T} - dim_left = dim(Lis) - dim_right = dim(Ris) - MQ,MP = polar(reshape(data(Astore),dim_left,dim_right)) - dim_middle = min(dim_left,dim_right) - Uis = prime(Ris) - Qis,Qstore = IndexSet(Lis...,Uis...),Dense{T}(vec(MQ)) - Pis,Pstore = IndexSet(Uis...,Ris...),Dense{T}(vec(MP)) - return (Qis,Qstore,Pis,Pstore) -end - diff --git a/src/storage/tensorstorage.jl b/src/storage/tensorstorage.jl deleted file mode 100644 index 0448705c33..0000000000 --- a/src/storage/tensorstorage.jl +++ /dev/null @@ -1,67 +0,0 @@ -export data - -abstract type TensorStorage end - -# -# Generic ITensor storage functions -# - -storage_randn!(S::TensorStorage) = randn!(data(S)) -storage_norm(S::TensorStorage) = norm(data(S)) -storage_conj(S::T) where {T<:TensorStorage}= T(conj(data(S))) - -# -# This high level part of contract is generic to different -# storage types -# -# TODO: make this storage_contract!(), where C is pre-allocated. -# This will allow for in-place multiplication -# TODO: optimize the contraction logic so C doesn't get permuted? -# -function storage_contract(Astore::TensorStorage, - Ais::IndexSet, - Bstore::TensorStorage, - Bis::IndexSet) - if length(Ais)==0 - Cis = Bis - Cstore = storage_scalar(Astore)*Bstore - elseif length(Bis)==0 - Cis = Ais - Cstore = storage_scalar(Bstore)*Astore - else - #TODO: check for special case when Ais and Bis are disjoint sets - #I think we should do this analysis outside of storage_contract, at the ITensor level - #(since it is universal for any storage type and just analyzes in indices) - (Alabels,Blabels) = compute_contraction_labels(Ais,Bis) - if is_outer(Alabels,Blabels) - Cstore,Cis = storage_outer(Astore,Ais,Bstore,Bis) - else - (Cis,Clabels) = contract_inds(Ais,Alabels,Bis,Blabels) - Cstore = _contract(Cis,Clabels,Astore,Ais,Alabels,Bstore,Bis,Blabels) - end - end - return (Cis,Cstore) -end - -# Generic outer function that handles proper -# storage promotion -# TODO: should this handle promotion with storage -# type switching? -# TODO: we should combine all of the storage_add! -# outer wrappers into a single call that promotes -# based on the storage type, i.e. promote_type(Dense,Diag) -> Dense -function storage_add!(Bstore::BT, - Bis::IndexSet, - Astore::AT, - Ais::IndexSet, - x::Number = 1) where {BT<:TensorStorage,AT<:TensorStorage} - NT = promote_type(AT,BT) - if NT == BT - _add!(Bstore,Bis,Astore,Ais, x) - return Bstore - end - Nstore = storage_convert(NT,Bstore,Bis) - _add!(Nstore,Bis,Astore,Ais, x) - return Nstore -end - diff --git a/src/storage/combiner.jl b/src/tensor/combiner.jl similarity index 100% rename from src/storage/combiner.jl rename to src/tensor/combiner.jl diff --git a/src/storage/contract.jl b/src/tensor/contract.jl similarity index 100% rename from src/storage/contract.jl rename to src/tensor/contract.jl diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl new file mode 100644 index 0000000000..3f787cdcfc --- /dev/null +++ b/src/tensor/dense.jl @@ -0,0 +1,426 @@ +export Dense + +struct Dense{T} <: TensorStorage + data::Vector{T} + Dense{T}(data::Vector) where {T} = new{T}(convert(Vector{T},data)) + Dense(data::Vector{T}) where {T} = new{T}(data) + Dense{T}(size::Integer) where {T} = new{T}(Vector{T}(undef,size)) + Dense{T}(x::Number,size::Integer) where {T} = new{T}(fill(T(x),size)) + Dense(x::T,size::Integer) where {T<:Number} = new{T}(fill(T,size)) + Dense{T}() where {T} = new{T}(Vector{T}()) +end + +data(D::Dense) = D.data + +# Convenient functions for Dense storage type +Base.getindex(D::Dense,i::Integer) = data(D)[i] +Base.setindex!(D::Dense,v,i::Integer) = (data(D)[i] = v) +Base.similar(D::Dense) = Dense(similar(data(D))) +Base.similar(D::Dense,::Type{S}) where {S} = Dense(similar(data(D),S)) +Base.copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) +Base.eltype(D::Dense{T}) where {T} = eltype(T) +Base.eltype(D::Dense{Nothing}) = Nothing +Base.eltype(::Type{Dense{T}}) where {T} = eltype(T) +#Base.length(D::Dense) = length(data(D)) + +# Basic functionality for AbstractArray interface +Base.IndexStyle(::Type{T}) where {T<:Tensor{ElT,N,StoreT}} where {ElT,N,StoreT<:Dense} = IndexLinear() +Base.getindex(T::Tensor{ElT,N,StoreT},i::Integer) where {ElT,N,StoreT<:Dense} = store(T)[i] +Base.setindex!(T::Tensor{ElT,N,StoreT},v,i::Integer) where {ElT,N,StoreT<:Dense} = (store(T)[i] = v) + +# Create an Array that is a view of the Dense Tensor +# Useful for using Base Array functions +function Base.Array(T::Tensor{ElT,N,StoreT}) where {ElT,N,StoreT<:Dense} + return reshape(data(store(T)),dims(inds(T))) +end + +function Base.permutedims!(T1::Tensor{ElT1,N,StoreT1}, + T2::Tensor{ElT2,N,StoreT2}, + perm) where {ElT1,ElT2,N,StoreT1<:Dense,StoreT2<:Dense} + permutedims!(Array(T1),Array(T2),perm) + return T1 +end + +function Base.permutedims(T::Tensor{ElT,N,StoreT}, + perm) where {ElT,N,StoreT<:Dense} + Tp = similar(T,permute(inds(T),perm)) + permutedims!(Tp,T,perm) + return Tp +end + +# For use in custom permutedims! +using Base.Cartesian: @nexprs, + @ntuple, + @nloops + +# +# A generalized permutedims!(P,B,perm) that also allows +# a function to be applied elementwise +# +# Based off of the permutedims! implementation in Julia's base: +# https://github.com/JuliaLang/julia/blob/91151ab871c7e7d6689d1cfa793c12062d37d6b6/base/multidimensional.jl#L1355 +# +@generated function Base.permutedims!(TP::Tensor{ElTP,N,<:Dense}, + T::Tensor{ElT,N,<:Dense}, + perm, + f::Function) where {ElTP,ElT,N} + quote + Base.checkdims_perm(TP, T, perm) + + #calculates all the strides + native_strides = Base.size_to_strides(1, size(T)...) + strides_1 = 0 + @nexprs $N d->(strides_{d+1} = native_strides[perm[d]]) + + #Creates offset, because indexing starts at 1 + offset = 1 - sum(@ntuple $N d->strides_{d+1}) + + ind = 1 + @nexprs 1 d->(counts_{$N+1} = strides_{$N+1}) # a trick to set counts_($N+1) + @nloops($N, i, TP, + d->(counts_d = strides_d), # PRE + d->(counts_{d+1} += strides_{d+1}), # POST + begin # BODY + sumc = sum(@ntuple $N d->counts_{d+1}) + @inbounds TP[ind] = f(TP[ind],T[sumc+offset]) + ind += 1 + end) + + return TP + end +end + +# Do we actually need all of these? +#TODO: this should do proper promotions of the storage data +#e.g. ComplexF64*Dense{Float64} -> Dense{ComplexF64} +#*(D::Dense{T},x::S) where {T,S<:Number} = Dense{promote_type(T,S)}(x*data(D)) +#*(x::Number,D::Dense) = D*x + +#Base.promote_type(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} + +#convert(::Type{Dense{T}},D::Dense) where {T} = Dense{T}(data(D)) + +#storage_convert(::Type{Dense{T}},D::Dense,is::IndexSet) where {T} = convert(Dense{T},D) + +# convert to complex +#storage_complex(D::Dense{T}) where {T} = Dense{complex(T)}(complex(data(D))) + +#function storage_outer(D1::Dense{T},is1::IndexSet,D2::Dense{S},is2::IndexSet) where {T, S <:Number} +# return Dense{promote_type(T,S)}(vec(data(D1)*transpose(data(D2)))),IndexSet(is1,is2) +#end + +#storage_convert(::Type{Array},D::Dense,is::IndexSet) = reshape(data(D),dims(is)) + +#function storage_convert(::Type{Array}, +# D::Dense, +# ois::IndexSet, +# nis::IndexSet) +# P = calculate_permutation(nis,ois) +# A = reshape(data(D),dims(ois)) +# return permutedims(A,P) +#end + +#storage_fill!(D::Dense,x::Number) = fill!(data(D),x) +# +#function storage_getindex(Tstore::Dense{T}, +# Tis::IndexSet, +# vals::Union{Int, AbstractVector{Int}}...) where {T} +# return getindex(reshape(data(Tstore),dims(Tis)),vals...) +#end +# +#function storage_setindex!(Tstore::Dense, +# Tis::IndexSet, +# x::Union{<:Number, AbstractArray{<:Number}}, +# vals::Union{Int, AbstractVector{Int}}...) +# return setindex!(reshape(data(Tstore),dims(Tis)),x,vals...) +#end + +#function _add!(Bstore::Dense, +# Bis::IndexSet, +# Astore::Dense, +# Ais::IndexSet, +# x::Number = 1) +# p = getperm(Bis,Ais) +# Adata = data(Astore) +# Bdata = data(Bstore) +# if is_trivial_permutation(p) +# if x == 1 +# Bdata .+= Adata +# else +# Bdata .= Bdata .+ x .* Adata +# end +# else +# reshapeBdata = reshape(Bdata,dims(Bis)) +# reshapeAdata = reshape(Adata,dims(Ais)) +# if x == 1 +# _add!(reshapeBdata,reshapeAdata,p) +# else +# _add!(reshapeBdata,reshapeAdata,p,(a,b)->a+x*b) +# end +# end +#end + +#function storage_copyto!(Bstore::Dense, +# Bis::IndexSet, +# Astore::Dense, +# Ais::IndexSet, +# x::Number = 1) +# p = getperm(Bis,Ais) +# Adata = data(Astore) +# Bdata = data(Bstore) +# if is_trivial_permutation(p) +# if x == 1 +# Bdata .= Adata +# else +# Bdata .= x .* Adata +# end +# else +# reshapeBdata = reshape(Bdata,dims(Bis)) +# reshapeAdata = reshape(Adata,dims(Ais)) +# if x == 1 +# _add!(reshapeBdata,reshapeAdata,p,(a,b)->b) +# else +# _add!(reshapeBdata,reshapeAdata,p,(a,b)->x*b) +# end +# end +#end + +#function storage_mult!(Astore::Dense, +# x::Number) +# Adata = data(Astore) +# rmul!(Adata, x) +#end + +#function storage_mult(Astore::Dense, +# x::Number) +# Bstore = copy(Astore) +# storage_mult!(Bstore, x) +# return Bstore +#end + +# For Real storage and complex scalar, promotion +# of the storage is needed +#function storage_mult(Astore::Dense{T}, +# x::S) where {T<:Real,S<:Complex} +# Bstore = convert(Dense{promote_type(S,T)},Astore) +# storage_mult!(Bstore, x) +# return Bstore +#end + +## TODO: make this a special version of storage_add!() +## Make sure the permutation is optimized +#function storage_permute!(Bstore::Dense, Bis::IndexSet, +# Astore::Dense, Ais::IndexSet) +# p = calculate_permutation(Bis,Ais) +# Adata = data(Astore) +# Bdata = data(Bstore) +# if is_trivial_permutation(p) +# Bdata .= Adata +# else +# reshapeBdata = reshape(Bdata,dims(Bis)) +# permutedims!(reshapeBdata,reshape(Adata,dims(Ais)),p) +# end +#end + +#function storage_dag(Astore::Dense,Ais::IndexSet) +# return dag(Ais),storage_conj(Astore) +#end + +#function storage_scalar(D::Dense) +# length(D)==1 && return D[1] +# throw(ErrorException("Cannot convert Dense -> Number for length of data greater than 1")) +#end + +function contract!(R::Tensor{ElR,NR,<:Dense},labelsR, + T1::Tensor{ElT1,N1,<:Dense},labels1, + T2::Tensor{ElT2,N2,<:Dense},labels2) where {ElR,ElT1,ElT2,NR,N1,N2} + if N1==0 + # TODO: replace with an add! function? + # What about doing `R .= T1[] .* PermutedDimsArray(T2,perm)`? + perm = getperm(labelsR,labels2) + permutedims!(R,T2,perm,(r,t2)->T1[]*t2) + elseif N2==0 + perm = getperm(labelsR,labels1) + permutedims!(R,T1,perm,(r,t1)->T2[]*t1) + elseif isdisjoint(labels1,labels2) + # TODO: replace with a lazy kron? + # Can this be done with one operation that permutes T1 and T2 + # and overwrites R? + # TODO: permute T1 and T2 appropriately first (can be more efficient + # then permuting the result of T1⊗T2) + Rp = kron(T1,T2) + labelsRp = union(labels1,labels2) + perm = getperm(labelsR,labelsRp) + if !istrivial(perm) + permutedims!(R,Rp,perm) + else + copyto!(R,Rp) + end + else + _contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + end + return R +end + +function _contract!(R::Tensor{ElR,NR,<:Dense},labelsR, + T1::Tensor{ElT1,N1,<:Dense},labels1, + T2::Tensor{ElT2,N2,<:Dense},labels2) where {ElR,ElT1,ElT2,NR,N1,N2} + error("_contract!(...) not yet implemented") +end + +#function _contract(Cinds::IndexSet, Clabels::Vector{Int}, +# Astore::Dense{SA}, Ainds::IndexSet, Alabels::Vector{Int}, +# Bstore::Dense{SB}, Binds::IndexSet, Blabels::Vector{Int}) where {SA<:Number,SB<:Number} +# SC = promote_type(SA,SB) +# +# # Convert the arrays to a common type +# # since we will call BLAS +# Astore = convert(Dense{SC},Astore) +# Bstore = convert(Dense{SC},Bstore) +# +# Adims = dims(Ainds) +# Bdims = dims(Binds) +# Cdims = dims(Cinds) +# +# # Create storage for output tensor +# Cstore = Dense{SC}(prod(Cdims)) +# +# Adata = reshape(data(Astore),Adims) +# Bdata = reshape(data(Bstore),Bdims) +# Cdata = reshape(data(Cstore),Cdims) +# +# if(length(Alabels)==0) +# contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) +# elseif(length(Blabels)==0) +# contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) +# else +# props = CProps(Alabels,Blabels,Clabels) +# compute!(props,Adata,Bdata,Cdata) +# _contract_dense_dense!(Cdata,props,Adata,Bdata) +# end +# +# return Cstore +#end + +function storage_svd(Astore::Dense{T}, + Lis::IndexSet, + Ris::IndexSet; + kwargs...) where {T} + maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) + mindim::Int = get(kwargs,:mindim,1) + cutoff::Float64 = get(kwargs,:cutoff,0.0) + absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) + doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) + utags::String = get(kwargs,:utags,"Link,u") + vtags::String = get(kwargs,:vtags,"Link,v") + fastSVD::Bool = get(kwargs,:fastSVD,false) + + if fastSVD + MU,MS,MV = svd(reshape(data(Astore),dim(Lis),dim(Ris))) + else + MU,MS,MV = recursiveSVD(reshape(data(Astore),dim(Lis),dim(Ris))) + end + MV = conj!(MV) + + P = MS.^2 + #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff + truncate!(P;mindim=mindim, + maxdim=maxdim, + cutoff=cutoff, + absoluteCutoff=absoluteCutoff, + doRelCutoff=doRelCutoff) + dS = length(P) + if dS < length(MS) + MU = MU[:,1:dS] + resize!(MS,dS) + MV = MV[:,1:dS] + end + + u = Index(dS,utags) + v = settags(u,vtags) + Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) + #TODO: make a diag storage + Sis,Sstore = IndexSet(u,v),Diag{Vector{Float64}}(MS) + Vis,Vstore = IndexSet(Ris...,v),Dense{T}(Vector{T}(vec(MV))) + + return (Uis,Ustore,Sis,Sstore,Vis,Vstore) +end + +function storage_eigen(Astore::Dense{T}, + Lis::IndexSet, + Ris::IndexSet; + kwargs...) where {T} + maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) + mindim::Int = get(kwargs,:mindim,1) + cutoff::Float64 = get(kwargs,:cutoff,0.0) + absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) + doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) + tags::TagSet = get(kwargs,:lefttags,"Link,u") + lefttags::TagSet = get(kwargs,:lefttags,tags) + righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) + + dim_left = dim(Lis) + dim_right = dim(Ris) + MD,MU = eigen(Hermitian(reshape(data(Astore),dim_left,dim_right))) + + # Sort by largest to smallest eigenvalues + p = sortperm(MD; rev = true) + MD = MD[p] + MU = MU[:,p] + + #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff + truncate!(MD;maxdim=maxdim, + cutoff=cutoff, + absoluteCutoff=absoluteCutoff, + doRelCutoff=doRelCutoff) + dD = length(MD) + if dD < size(MU,2) + MU = MU[:,1:dD] + end + + #TODO: include truncation parameters as keyword arguments + u = Index(dD,lefttags) + v = settags(u,righttags) + Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) + Dis,Dstore = IndexSet(u,v),Diag{Vector{Float64}}(MD) + return (Uis,Ustore,Dis,Dstore) +end + +# TODO: move this to a general "linear_algebra.jl" file? +# i.e. a common place for custom linear algebra functionality +# for matrices +function polar(A::Matrix) + U,S,V = svd(A) # calls LinearAlgebra.svd() + return U*V',V*Diagonal(S)*V' +end + +function storage_qr(Astore::Dense{T}, + Lis::IndexSet, + Ris::IndexSet; + kwargs...) where {T} + tags::TagSet = get(kwargs,:tags,"Link,u") + dim_left = dim(Lis) + dim_right = dim(Ris) + MQ,MP = qr(reshape(data(Astore),dim_left,dim_right)) + dim_middle = min(dim_left,dim_right) + u = Index(dim_middle,tags) + #Must call Matrix() on MQ since the QR decomposition outputs a sparse + #form of the decomposition + Qis,Qstore = IndexSet(Lis...,u),Dense{T}(vec(Matrix(MQ))) + Pis,Pstore = IndexSet(u,Ris...),Dense{T}(vec(Matrix(MP))) + return (Qis,Qstore,Pis,Pstore) +end + +function storage_polar(Astore::Dense{T}, + Lis::IndexSet, + Ris::IndexSet) where {T} + dim_left = dim(Lis) + dim_right = dim(Ris) + MQ,MP = polar(reshape(data(Astore),dim_left,dim_right)) + dim_middle = min(dim_left,dim_right) + Uis = prime(Ris) + Qis,Qstore = IndexSet(Lis...,Uis...),Dense{T}(vec(MQ)) + Pis,Pstore = IndexSet(Uis...,Ris...),Dense{T}(vec(MP)) + return (Qis,Qstore,Pis,Pstore) +end + diff --git a/src/storage/diag.jl b/src/tensor/diag.jl similarity index 100% rename from src/storage/diag.jl rename to src/tensor/diag.jl diff --git a/src/storage/svd.jl b/src/tensor/svd.jl similarity index 100% rename from src/storage/svd.jl rename to src/tensor/svd.jl diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl new file mode 100644 index 0000000000..1376a8b0d8 --- /dev/null +++ b/src/tensor/tensor.jl @@ -0,0 +1,59 @@ +export Tensor, + inds, + store + +""" +Tensor{StoreT,IndsT} + +A plain old tensor (with order independent +interface and no assumption of labels) +""" +struct Tensor{T,N,StoreT,IndsT} <: AbstractArray{T,N} + store::StoreT + inds::IndsT + function Tensor(store::StoreT,inds::IndsT) where {StoreT,IndsT} + new{eltype(StoreT),length(inds),StoreT,IndsT}(store,inds) + end +end + +store(T::Tensor) = T.store +inds(T::Tensor) = T.inds + +# This function is used in the AbstractArray interface +#Base.axes(T::Tensor) = Tuple(inds(T)) + +dims(ds::Dims) = ds + +# The size is obtained from the indices +function Base.size(T::Tensor{StoreT,IndsT}) where {StoreT,IndsT} + return dims(inds(T)) +end + +Base.copy(T::Tensor) = Tensor(copy(store(T)),copy(inds(T))) + +Base.similar(T::Tensor) = Tensor(similar(store(T)),inds(T)) +Base.similar(T::Tensor,dims) where {S} = Tensor(similar(store(T)),dims) +Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) +Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) + +Base.BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() + +function Base.similar(bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{T}}, + ::Type{ElType}) where {T<:Tensor,ElType} + A = find_tensor(bc) + return similar(A) +end + +# This is used for overloading broadcast +"`A = find_tensor(As)` returns the first Tensor among the arguments." +find_tensor(bc::Base.Broadcast.Broadcasted) = find_tensor(bc.args) +find_tensor(args::Tuple) = find_tensor(find_tensor(args[1]), Base.tail(args)) +find_tensor(x) = x +find_tensor(a::Tensor, rest) = a +find_tensor(::Any, rest) = find_tensor(rest) + +#Base.similar(T::Tensor) = Tensor(similar(store(T)),copy(inds(T))) +#Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T)),copy(inds(T))) + +#Base.getindex(A::TensorT, i::Int) where {TensorT<:Tensor} = error("getindex not yet implemented for Tensor type $TensorT") + diff --git a/src/tensor/tensorstorage.jl b/src/tensor/tensorstorage.jl new file mode 100644 index 0000000000..928b62fe0a --- /dev/null +++ b/src/tensor/tensorstorage.jl @@ -0,0 +1,67 @@ +export data + +abstract type TensorStorage end + +# +# Generic ITensor storage functions +# + +#storage_randn!(S::TensorStorage) = randn!(data(S)) +#storage_norm(S::TensorStorage) = norm(data(S)) +#storage_conj(S::T) where {T<:TensorStorage}= T(conj(data(S))) + +# +# This high level part of contract is generic to different +# storage types +# +# TODO: make this storage_contract!(), where C is pre-allocated. +# This will allow for in-place multiplication +# TODO: optimize the contraction logic so C doesn't get permuted? +# +#function storage_contract(Astore::TensorStorage, +# Ais::IndexSet, +# Bstore::TensorStorage, +# Bis::IndexSet) +# if length(Ais)==0 +# Cis = Bis +# Cstore = storage_scalar(Astore)*Bstore +# elseif length(Bis)==0 +# Cis = Ais +# Cstore = storage_scalar(Bstore)*Astore +# else +# #TODO: check for special case when Ais and Bis are disjoint sets +# #I think we should do this analysis outside of storage_contract, at the ITensor level +# #(since it is universal for any storage type and just analyzes in indices) +# (Alabels,Blabels) = compute_contraction_labels(Ais,Bis) +# if is_outer(Alabels,Blabels) +# Cstore,Cis = storage_outer(Astore,Ais,Bstore,Bis) +# else +# (Cis,Clabels) = contract_inds(Ais,Alabels,Bis,Blabels) +# Cstore = _contract(Cis,Clabels,Astore,Ais,Alabels,Bstore,Bis,Blabels) +# end +# end +# return (Cis,Cstore) +#end + +# Generic outer function that handles proper +# storage promotion +# TODO: should this handle promotion with storage +# type switching? +# TODO: we should combine all of the storage_add! +# outer wrappers into a single call that promotes +# based on the storage type, i.e. promote_type(Dense,Diag) -> Dense +#function storage_add!(Bstore::BT, +# Bis::IndexSet, +# Astore::AT, +# Ais::IndexSet, +# x::Number = 1) where {BT<:TensorStorage,AT<:TensorStorage} +# NT = promote_type(AT,BT) +# if NT == BT +# _add!(Bstore,Bis,Astore,Ais, x) +# return Bstore +# end +# Nstore = storage_convert(NT,Bstore,Bis) +# _add!(Nstore,Bis,Astore,Ais, x) +# return Nstore +#end + diff --git a/test/test_itensor.jl b/test/test_itensor_dense.jl similarity index 96% rename from test/test_itensor.jl rename to test/test_itensor_dense.jl index 47109e755c..ca98e638f0 100644 --- a/test/test_itensor.jl +++ b/test/test_itensor_dense.jl @@ -7,6 +7,8 @@ Random.seed!(12345) digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) +@testset "Dense ITensor basic functionality" begin + @testset "ITensor constructors" begin i = Index(2,"i") j = Index(2,"j") @@ -127,7 +129,8 @@ end A = randomITensor(i,j) B = similar(A) @test inds(B) == inds(A) - @test_throws ErrorException similar(A, ComplexF32) + Ac = similar(A, ComplexF32) + @test store(Ac) isa Dense{ComplexF32} end @testset "fill!" begin @@ -387,17 +390,18 @@ end for ii ∈ 1:dim(i), jj ∈ 1:dim(j), kk ∈ 1:dim(k) @test A[k(kk),i(ii),j(jj)]==permA[i(ii),j(jj),k(kk)] end + # TODO: why do we need this? can't we just splat? @testset "getindex and setindex with vector of IndexVals" begin k_inds = [k(kk) for kk ∈ 1:dim(k)] for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - @test A[k_inds,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds] + @test A[k_inds...,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds...] end for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - A[k_inds,i(ii),j(jj)]=collect(1:length(k_inds)) + A[k_inds...,i(ii),j(jj)]=collect(1:length(k_inds)) end permA = permute(A,k,j,i) for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - @test A[k_inds,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds] + @test A[k_inds...,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds...] end end end @@ -488,4 +492,5 @@ end end # End ITensor factorization testset end # End Dense storage test +end # End Dense ITensor basic functionality diff --git a/test/test_diagITensor.jl b/test/test_itensor_diag.jl similarity index 100% rename from test/test_diagITensor.jl rename to test/test_itensor_diag.jl From 9a77db2a9cc8983ec84e21416450579b38a1b91d Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 30 Aug 2019 15:27:43 -0400 Subject: [PATCH 02/27] Working on Dense-Dense contraction --- src/sets.jl | 4 ++-- src/tensor/dense.jl | 53 +++++++++++++++++++++++++++++--------------- src/tensor/tensor.jl | 16 +++++++++---- 3 files changed, 49 insertions(+), 24 deletions(-) diff --git a/src/sets.jl b/src/sets.jl index fa54fd8110..842375c596 100644 --- a/src/sets.jl +++ b/src/sets.jl @@ -1,7 +1,7 @@ function isdisjoint(s1,s2) - for i1 ∈ length(s1) - for i2 ∈ length(s2) + for i1 ∈ 1:length(s1) + for i2 ∈ 1:length(s2) s1[i1] == s2[i2] && return false end end diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 3f787cdcfc..584615ecbe 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -15,10 +15,16 @@ data(D::Dense) = D.data # Convenient functions for Dense storage type Base.getindex(D::Dense,i::Integer) = data(D)[i] Base.setindex!(D::Dense,v,i::Integer) = (data(D)[i] = v) -Base.similar(D::Dense) = Dense(similar(data(D))) -Base.similar(D::Dense,::Type{S}) where {S} = Dense(similar(data(D),S)) + +Base.similar(D::Dense{T}) where {T} = Dense{T}(length(D)) +Base.similar(D::Dense{T},dims) where {T} = Dense{T}(dim(dims)) +Base.similar(::Type{Dense{T}},dims) where {T} = Dense{T}(dim(dims)) +Base.similar(D::Dense{T},::Type{S}) where {T,S} = Dense{S}(length(D)) Base.copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) + Base.eltype(D::Dense{T}) where {T} = eltype(T) +# This is necessary since for some reason inference doesn't work +# with the more general definition (eltype(Nothing) === Any) Base.eltype(D::Dense{Nothing}) = Nothing Base.eltype(::Type{Dense{T}}) where {T} = eltype(T) #Base.length(D::Dense) = length(data(D)) @@ -30,9 +36,8 @@ Base.setindex!(T::Tensor{ElT,N,StoreT},v,i::Integer) where {ElT,N,StoreT<:Dense} # Create an Array that is a view of the Dense Tensor # Useful for using Base Array functions -function Base.Array(T::Tensor{ElT,N,StoreT}) where {ElT,N,StoreT<:Dense} - return reshape(data(store(T)),dims(inds(T))) -end +Base.Array(T::Tensor) = reshape(data(store(T)),dims(inds(T))) +Base.Vector(T::Tensor) = vec(Array(T)) function Base.permutedims!(T1::Tensor{ElT1,N,StoreT1}, T2::Tensor{ElT2,N,StoreT2}, @@ -90,6 +95,11 @@ using Base.Cartesian: @nexprs, end end +function outer(T1::Tensor,T2::Tensor) + return Tensor(Dense(vec(Vector(T1)*transpose(Vector(T2)))),union(inds(T1),inds(T2))) +end +const ⊗ = outer + # Do we actually need all of these? #TODO: this should do proper promotions of the storage data #e.g. ComplexF64*Dense{Float64} -> Dense{ComplexF64} @@ -231,25 +241,30 @@ end # throw(ErrorException("Cannot convert Dense -> Number for length of data greater than 1")) #end +function contract(T1::Tensor{ElT1,N1,<:Dense},labelsT1, + T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElT1,ElT2,N1,N2} + indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) + R = similar(promote_type(typeof(T1),typeof(T2)),indsR) + contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + return R +end + function contract!(R::Tensor{ElR,NR,<:Dense},labelsR, - T1::Tensor{ElT1,N1,<:Dense},labels1, - T2::Tensor{ElT2,N2,<:Dense},labels2) where {ElR,ElT1,ElT2,NR,N1,N2} + T1::Tensor{ElT1,N1,<:Dense},labelsT1, + T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElR,ElT1,ElT2,NR,N1,N2} if N1==0 # TODO: replace with an add! function? # What about doing `R .= T1[] .* PermutedDimsArray(T2,perm)`? - perm = getperm(labelsR,labels2) + perm = getperm(labelsR,labelsT2) permutedims!(R,T2,perm,(r,t2)->T1[]*t2) elseif N2==0 - perm = getperm(labelsR,labels1) + perm = getperm(labelsR,labelsT1) permutedims!(R,T1,perm,(r,t1)->T2[]*t1) - elseif isdisjoint(labels1,labels2) - # TODO: replace with a lazy kron? - # Can this be done with one operation that permutes T1 and T2 - # and overwrites R? + elseif isdisjoint(labelsT1,labelsT2) # TODO: permute T1 and T2 appropriately first (can be more efficient # then permuting the result of T1⊗T2) - Rp = kron(T1,T2) - labelsRp = union(labels1,labels2) + Rp = T1⊗T2 + labelsRp = union(labelsT1,labelsT2) perm = getperm(labelsR,labelsRp) if !istrivial(perm) permutedims!(R,Rp,perm) @@ -263,9 +278,11 @@ function contract!(R::Tensor{ElR,NR,<:Dense},labelsR, end function _contract!(R::Tensor{ElR,NR,<:Dense},labelsR, - T1::Tensor{ElT1,N1,<:Dense},labels1, - T2::Tensor{ElT2,N2,<:Dense},labels2) where {ElR,ElT1,ElT2,NR,N1,N2} - error("_contract!(...) not yet implemented") + T1::Tensor{ElT1,N1,<:Dense},labelsT1, + T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElR,ElT1,ElT2,NR,N1,N2} + props = CProps(labelsT1,labelsT2,labelsR) + compute!(props,T1,T2,R) + _contract_dense_dense!(R,props,T1,T2) end #function _contract(Cinds::IndexSet, Clabels::Vector{Int}, diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 1376a8b0d8..f68764dd2c 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -8,7 +8,7 @@ Tensor{StoreT,IndsT} A plain old tensor (with order independent interface and no assumption of labels) """ -struct Tensor{T,N,StoreT,IndsT} <: AbstractArray{T,N} +struct Tensor{ElT,N,StoreT,IndsT} <: AbstractArray{ElT,N} store::StoreT inds::IndsT function Tensor(store::StoreT,inds::IndsT) where {StoreT,IndsT} @@ -23,6 +23,7 @@ inds(T::Tensor) = T.inds #Base.axes(T::Tensor) = Tuple(inds(T)) dims(ds::Dims) = ds +dim(ds::Dims) = prod(ds) # The size is obtained from the indices function Base.size(T::Tensor{StoreT,IndsT}) where {StoreT,IndsT} @@ -31,10 +32,17 @@ end Base.copy(T::Tensor) = Tensor(copy(store(T)),copy(inds(T))) +function Base.similar(T::Type{Tensor{ElT,N,StoreT,IndsT}},dims) where {ElT,N,StoreT,IndsT} + return Tensor(similar(StoreT,dims),dims) +end +#Base.similar(T::Type{<:Tensor},::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) +#Base.similar(T::Type{<:Tensor},::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) + +# TODO: implement these versions Base.similar(T::Tensor) = Tensor(similar(store(T)),inds(T)) -Base.similar(T::Tensor,dims) where {S} = Tensor(similar(store(T)),dims) -Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) -Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) +Base.similar(T::Tensor,dims) = Tensor(similar(store(T),dims),dims) +#Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) +#Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) Base.BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() From 4c4dc4bc8a46c22343e1bf83bbe8b18224ff4c4d Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 30 Aug 2019 15:36:51 -0400 Subject: [PATCH 03/27] Basic Dense-Dense contraction working --- src/ITensors.jl | 2 +- src/tensor/contract.jl | 154 ++++++++++++++++++++--------------------- src/tensor/dense.jl | 62 ++++++++++++++++- 3 files changed, 139 insertions(+), 79 deletions(-) diff --git a/src/ITensors.jl b/src/ITensors.jl index a11e360a4b..6b5c4ba3fc 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -66,10 +66,10 @@ include("index.jl") include("indexset.jl") include("tensor/tensor.jl") include("tensor/tensorstorage.jl") +include("tensor/contract.jl") include("tensor/dense.jl") include("tensor/diag.jl") include("tensor/combiner.jl") -include("tensor/contract.jl") include("tensor/svd.jl") include("itensor.jl") include("decomp.jl") diff --git a/src/tensor/contract.jl b/src/tensor/contract.jl index fda4d1792e..a2fa261c2f 100644 --- a/src/tensor/contract.jl +++ b/src/tensor/contract.jl @@ -1,20 +1,20 @@ -# Check if the contraction is an outer product -# (none of the labels are negative) -function is_outer(l1::Vector{Int},l2::Vector{Int}) - for l1i in l1 - if l1i < 0 - return false - end - end - # I think this one is not necessary - for l2i in l2 - if l2i < 0 - return false - end - end - return true -end +## Check if the contraction is an outer product +## (none of the labels are negative) +#function is_outer(l1::Vector{Int},l2::Vector{Int}) +# for l1i in l1 +# if l1i < 0 +# return false +# end +# end +# # I think this one is not necessary +# for l2i in l2 +# if l2i < 0 +# return false +# end +# end +# return true +#end mutable struct CProps ai::Vector{Int} @@ -158,9 +158,9 @@ function permute_extents(R::Vector{Int},P::Vector{Int})::Vector{Int} end function compute!(props::CProps, - A::Array, - B::Array, - C::Array) + A, + B, + C) compute_perms!(props) #Use props.PC.size() as a check to see if we've already run this @@ -436,64 +436,64 @@ function compute!(props::CProps, end -function _contract_dense_dense!(C::Array{T}, - p::CProps, - A::Array{T}, - B::Array{T}, - α::T=one(T), - β::T=zero(T)) where {T} - tA = 'N' - if p.permuteA - aref = reshape(permutedims(A,p.PA),p.dmid,p.dleft) - tA = 'T' - else - #A doesn't have to be permuted - if Atrans(p) - aref = reshape(A,p.dmid,p.dleft) - tA = 'T' - else - aref = reshape(A,p.dleft,p.dmid) - end - end - - tB = 'N' - if p.permuteB - bref = reshape(permutedims(B,p.PB),p.dmid,p.dright) - else - if Btrans(p) - bref = reshape(B,p.dright,p.dmid) - tB = 'T' - else - bref = reshape(B,p.dmid,p.dright) - end - end - - # TODO: this logic may be wrong - if p.permuteC - cref = reshape(copy(C),p.dleft,p.dright) - else - if Ctrans(p) - cref = reshape(C,p.dleft,p.dright) - if tA=='N' && tB=='N' - (aref,bref) = (bref,aref) - tA = tB = 'T' - elseif tA=='T' && tB=='T' - (aref,bref) = (bref,aref) - tA = tB = 'N' - end - else - cref = reshape(C,p.dleft,p.dright) - end - end - - #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) - BLAS.gemm!(tA,tB,α,aref,bref,β,cref) - - if p.permuteC - permutedims!(C,reshape(cref,p.newCrange...),p.PC) - end - return -end +#function _contract_dense_dense!(C::Array{T}, +# p::CProps, +# A::Array{T}, +# B::Array{T}, +# α::T=one(T), +# β::T=zero(T)) where {T} +# tA = 'N' +# if p.permuteA +# aref = reshape(permutedims(A,p.PA),p.dmid,p.dleft) +# tA = 'T' +# else +# #A doesn't have to be permuted +# if Atrans(p) +# aref = reshape(A,p.dmid,p.dleft) +# tA = 'T' +# else +# aref = reshape(A,p.dleft,p.dmid) +# end +# end +# +# tB = 'N' +# if p.permuteB +# bref = reshape(permutedims(B,p.PB),p.dmid,p.dright) +# else +# if Btrans(p) +# bref = reshape(B,p.dright,p.dmid) +# tB = 'T' +# else +# bref = reshape(B,p.dmid,p.dright) +# end +# end +# +# # TODO: this logic may be wrong +# if p.permuteC +# cref = reshape(copy(C),p.dleft,p.dright) +# else +# if Ctrans(p) +# cref = reshape(C,p.dleft,p.dright) +# if tA=='N' && tB=='N' +# (aref,bref) = (bref,aref) +# tA = tB = 'T' +# elseif tA=='T' && tB=='T' +# (aref,bref) = (bref,aref) +# tA = tB = 'N' +# end +# else +# cref = reshape(C,p.dleft,p.dright) +# end +# end +# +# #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) +# BLAS.gemm!(tA,tB,α,aref,bref,β,cref) +# +# if p.permuteC +# permutedims!(C,reshape(cref,p.newCrange...),p.PC) +# end +# return +#end #TODO: this should be optimized function _contract_scalar!(Cdata::Array,Clabels::Vector{Int}, diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 584615ecbe..d73d103c4b 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -282,7 +282,67 @@ function _contract!(R::Tensor{ElR,NR,<:Dense},labelsR, T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElR,ElT1,ElT2,NR,N1,N2} props = CProps(labelsT1,labelsT2,labelsR) compute!(props,T1,T2,R) - _contract_dense_dense!(R,props,T1,T2) + _contract_dense_dense!(Array(R),props,Array(T1),Array(T2)) + return R +end + +function _contract_dense_dense!(C::Array{T}, + p::CProps, + A::Array{T}, + B::Array{T}, + α::T=one(T), + β::T=zero(T)) where {T} + tA = 'N' + if p.permuteA + aref = reshape(permutedims(A,p.PA),p.dmid,p.dleft) + tA = 'T' + else + #A doesn't have to be permuted + if Atrans(p) + aref = reshape(A,p.dmid,p.dleft) + tA = 'T' + else + aref = reshape(A,p.dleft,p.dmid) + end + end + + tB = 'N' + if p.permuteB + bref = reshape(permutedims(B,p.PB),p.dmid,p.dright) + else + if Btrans(p) + bref = reshape(B,p.dright,p.dmid) + tB = 'T' + else + bref = reshape(B,p.dmid,p.dright) + end + end + + # TODO: this logic may be wrong + if p.permuteC + cref = reshape(copy(C),p.dleft,p.dright) + else + if Ctrans(p) + cref = reshape(C,p.dleft,p.dright) + if tA=='N' && tB=='N' + (aref,bref) = (bref,aref) + tA = tB = 'T' + elseif tA=='T' && tB=='T' + (aref,bref) = (bref,aref) + tA = tB = 'N' + end + else + cref = reshape(C,p.dleft,p.dright) + end + end + + #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) + BLAS.gemm!(tA,tB,α,aref,bref,β,cref) + + if p.permuteC + permutedims!(C,reshape(cref,p.newCrange...),p.PC) + end + return end #function _contract(Cinds::IndexSet, Clabels::Vector{Int}, From 559b43f2bd3d817ec70b13990f0b2ed3959e6e92 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 3 Sep 2019 18:26:05 -0400 Subject: [PATCH 04/27] Most dense tests pass --- src/ITensors.jl | 23 +- src/decomp.jl | 164 ++++----- src/indexset.jl | 129 +++++-- src/itensor.jl | 111 +++--- src/sets.jl | 10 - src/tensor/combiner.jl | 10 +- .../{contract.jl => contraction_logic.jl} | 131 ++++---- src/tensor/dense.jl | 315 +++++++++++------- src/tensor/svd.jl | 15 +- src/tensor/tensor.jl | 15 +- src/tensor/truncate.jl | 76 +++++ test/test_itensor_dense.jl | 44 ++- 12 files changed, 632 insertions(+), 411 deletions(-) delete mode 100644 src/sets.jl rename src/tensor/{contract.jl => contraction_logic.jl} (82%) create mode 100644 src/tensor/truncate.jl diff --git a/src/ITensors.jl b/src/ITensors.jl index 6b5c4ba3fc..f07e41f179 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -49,28 +49,33 @@ import Base.adjoint, ##################################### # Global Variables - +# const warnTensorOrder = 10 ##################################### -# Helper functionality +# Index and IndexSet # - -include("sets.jl") - -##################################### - +# TODO: load these after Tensor functions include("smallstring.jl") include("tagset.jl") include("index.jl") include("indexset.jl") + +##################################### +# Tensor +# include("tensor/tensor.jl") include("tensor/tensorstorage.jl") -include("tensor/contract.jl") +include("tensor/contraction_logic.jl") include("tensor/dense.jl") include("tensor/diag.jl") include("tensor/combiner.jl") +include("tensor/truncate.jl") include("tensor/svd.jl") + +##################################### +# ITensor +# include("itensor.jl") include("decomp.jl") include("iterativesolvers.jl") @@ -95,4 +100,4 @@ include("physics/sitesets/spinone.jl") include("physics/sitesets/electron.jl") include("physics/sitesets/tj.jl") -end # module +end # module ITensors diff --git a/src/decomp.jl b/src/decomp.jl index bdfda12c26..d34cb35374 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -4,80 +4,80 @@ export svd, eigen, factorize -function truncate!(P::Vector{Float64}; - kwargs...)::Tuple{Float64,Float64} - maxdim::Int = min(get(kwargs,:maxdim,length(P)), length(P)) - mindim::Int = max(get(kwargs,:mindim,1), 1) - cutoff::Float64 = max(get(kwargs,:cutoff,0.0), 0.0) - absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) - doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - - origm = length(P) - docut = 0.0 - - if P[1]<=0.0 - P[1] = 0.0 - resize!(P,1) - return 0.,0. - end - - if origm==1 - docut = P[1]/2 - return 0.,docut - end - - #Zero out any negative weight - for n=origm:-1:1 - (P[n] >= 0.0) && break - P[n] = 0.0 - end - - n = origm - truncerr = 0.0 - while n > maxdim - truncerr += P[n] - n -= 1 - end - - if absoluteCutoff - #Test if individual prob. weights fall below cutoff - #rather than using *sum* of discarded weights - while P[n] <= cutoff && n > mindim - truncerr += P[n] - n -= 1 - end - else - scale = 1.0 - if doRelCutoff - scale = sum(P) - (scale==0.0) && (scale = 1.0) - end - - #Continue truncating until *sum* of discarded probability - #weight reaches cutoff reached (or m==mindim) - while (truncerr+P[n] <= cutoff*scale) && (n > mindim) - truncerr += P[n] - n -= 1 - end - - truncerr /= scale - end - - if n < 1 - n = 1 - end - - if n < origm - docut = (P[n]+P[n+1])/2 - if abs(P[n]-P[n+1]) < 1E-3*P[n] - docut += 1E-3*P[n] - end - end - - resize!(P,n) - - return truncerr,docut -end +#function truncate!(P::Vector{Float64}; +# kwargs...)::Tuple{Float64,Float64} +# maxdim::Int = min(get(kwargs,:maxdim,length(P)), length(P)) +# mindim::Int = max(get(kwargs,:mindim,1), 1) +# cutoff::Float64 = max(get(kwargs,:cutoff,0.0), 0.0) +# absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) +# doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) +# +# origm = length(P) +# docut = 0.0 +# +# if P[1]<=0.0 +# P[1] = 0.0 +# resize!(P,1) +# return 0.,0. +# end +# +# if origm==1 +# docut = P[1]/2 +# return 0.,docut +# end +# +# #Zero out any negative weight +# for n=origm:-1:1 +# (P[n] >= 0.0) && break +# P[n] = 0.0 +# end +# +# n = origm +# truncerr = 0.0 +# while n > maxdim +# truncerr += P[n] +# n -= 1 +# end +# +# if absoluteCutoff +# #Test if individual prob. weights fall below cutoff +# #rather than using *sum* of discarded weights +# while P[n] <= cutoff && n > mindim +# truncerr += P[n] +# n -= 1 +# end +# else +# scale = 1.0 +# if doRelCutoff +# scale = sum(P) +# (scale==0.0) && (scale = 1.0) +# end +# +# #Continue truncating until *sum* of discarded probability +# #weight reaches cutoff reached (or m==mindim) +# while (truncerr+P[n] <= cutoff*scale) && (n > mindim) +# truncerr += P[n] +# n -= 1 +# end +# +# truncerr /= scale +# end +# +# if n < 1 +# n = 1 +# end +# +# if n < origm +# docut = (P[n]+P[n+1])/2 +# if abs(P[n]-P[n+1]) < 1E-3*P[n] +# docut += 1E-3*P[n] +# end +# end +# +# resize!(P,n) +# +# return truncerr,docut +#end # Take ITensor A, permute the storage so that # Linds are in front, return the permuted A @@ -102,8 +102,8 @@ function qr(A::ITensor, Linds...) A,Lis,Ris = _permute_for_factorize(A,Linds...) Qis,Qstore,Pis,Pstore = storage_qr(store(A),Lis,Ris) - Q = ITensor(Qis,Qstore) - R = ITensor(Pis,Pstore) + Q = ITensor(Qstore,Qid) + R = ITensor(Pstore,Pis) return Q,R,commonindex(Q,R) end @@ -111,8 +111,8 @@ function polar(A::ITensor, Linds...) A,Lis,Ris = _permute_for_factorize(A,Linds...) Qis,Qstore,Pis,Pstore = storage_polar(store(A),Lis,Ris) - Q = ITensor(Qis,Qstore) - P = ITensor(Pis,Pstore) + Q = ITensor(Qstore,Qis) + P = ITensor(Pstore,Pis) return Q,P,commoninds(Q,P) end @@ -144,11 +144,11 @@ function svd(A::ITensor, Linds...; kwargs...) A,Lis,Ris = _permute_for_factorize(A,Linds...) - Uis,Ustore,Sis,Sstore,Vis,Vstore = storage_svd(store(A),Lis,Ris;kwargs...) + Lpos,Rpos = decomp_permutation(inds(A),Lis,Ris) + TU,TS,TV = svd(Tensor(A),Lpos,Rpos;kwargs...) + #Uis,Ustore,Sis,Sstore,Vis,Vstore = storage_svd(store(A),Lis,Ris;kwargs...) - U = ITensor(Uis,Ustore) - S = ITensor(Sis,Sstore) - V = ITensor(Vis,Vstore) + U,S,V = ITensor(TU),ITensor(TS),ITensor(TV) u = commonindex(U,S) v = commonindex(S,V) return U,S,V,u,v diff --git a/src/indexset.jl b/src/indexset.jl index 11f72f0b93..09913d0b87 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -22,17 +22,14 @@ export IndexSet, struct IndexSet{N} inds::MVector{N,Index} - IndexSet(inds::MVector{N,Index}) where {N} = new{N}(inds) - IndexSet(inds::NTuple{N,Index}) where {N} = new{N}(inds) + IndexSet{N}(inds::MVector{N,Index}) where {N} = new{N}(inds) + IndexSet{N}(inds::NTuple{N,Index}) where {N} = new{N}(inds) end +IndexSet(inds::MVector{N,Index}) where {N} = IndexSet{N}(inds) +IndexSet(inds::NTuple{N,Index}) where {N} = IndexSet{N}(inds) inds(is::IndexSet) = is.inds -function permute(is::IndexSet{N},perm) where {N} - indsp = ntuple(i->is[perm[i]], Val(N)) - return IndexSet(indsp) -end - # Empty constructor IndexSet() = IndexSet(()) @@ -59,11 +56,11 @@ function Base.show(io::IO, is::IndexSet) end end -getindex(is::IndexSet,n::Integer) = getindex(is.inds,n) -setindex!(is::IndexSet,i::Index,n::Integer) = setindex!(is.inds,i,n) -length(is::IndexSet) = length(is.inds) +Base.getindex(is::IndexSet,n::Integer) = getindex(is.inds,n) +Base.setindex!(is::IndexSet,i::Index,n::Integer) = setindex!(is.inds,i,n) +Base.length(is::IndexSet) = length(is.inds) order(is::IndexSet) = length(is) -copy(is::IndexSet) = IndexSet(copy(is.inds)) +Base.copy(is::IndexSet) = IndexSet(copy(is.inds)) dims(is::IndexSet{N}) where {N} = ntuple(i->dim(is[i]),Val(N)) dim(is::IndexSet) = prod(dim.(is)) dim(is::IndexSet,pos::Integer) = dim(is[pos]) @@ -83,8 +80,13 @@ dag(is::IndexSet) = IndexSet(dag.(is.inds)) # Allow iteration Base.iterate(is::IndexSet{N},state::Int=1) where {N} = state > N ? nothing : (is[state], state+1) +Base.eltype(is::IndexSet) = Index + +# Needed for findfirst (I think) +Base.keys(is::IndexSet{N}) where {N} = 1:N + #push!(is::IndexSet,i::Index) = push!(is.inds,i) -StaticArrays.push(is::IndexSet,i::Index) = push(is.inds,i) +StaticArrays.push(is::IndexSet{N},i::Index) where {N} = IndexSet{N+1}(push(is.inds,i)) """ minDim(is::IndexSet) @@ -187,7 +189,7 @@ function uniqueinds(Ainds,Binds) Ais = IndexSet(Ainds) Cis = IndexSet() for j ∈ Ais - _is_unique_index(j,Binds) && push!(Cis,j) + _is_unique_index(j,Binds) && (Cis = push(Cis,j)) end return Cis end @@ -223,7 +225,7 @@ function commoninds(Ainds,Binds) Ais = IndexSet(Ainds) Cis = IndexSet() for i ∈ Ais - hasindex(Binds,i) && push!(Cis,i) + hasindex(Binds,i) && (Cis = push(Cis,i)) end return Cis end @@ -257,7 +259,7 @@ function findinds(inds,tags) found_inds = IndexSet() for i ∈ is if hastags(i,ts) - push!(found_inds,i) + found_inds = push(found_inds,i) end end return found_inds @@ -277,13 +279,16 @@ function findindex(inds,tags) return i end end + # TODO: should this return `nothing` if no Index is found? return Index() end # This version checks if there are more than one indices #findindex(inds, tags) = Index(findinds(inds,tags)) -function findindex(is::IndexSet, - i::Index)::Int +# TODO: Should this return `nothing` like `findfirst`? +# Should this just use `findfirst`? +function indexposition(is::IndexSet, + i::Index)::Int for (n,j) in enumerate(is) if i==j return n @@ -433,6 +438,8 @@ function swaptags!(is::IndexSet, match = nothing) ts1 = TagSet(tags1) ts2 = TagSet(tags2) + # TODO: add debug check that this "random" tag + # doesn't clash with ts1 or ts2 tstemp = TagSet("e43efds") plev(ts1) ≥ 0 && (tstemp = setprime(tstemp,431534)) replacetags!(is, ts1, tstemp, match) @@ -442,22 +449,9 @@ function swaptags!(is::IndexSet, end swaptags(is, vargs...) = swaptags!(copy(is), vargs...) -function getperm(set1, set2) - l1 = length(set1) - l2 = length(set2) - l1==l2 || throw(DimensionMismatch("Mismatched input sizes in calcPerm: l1=$l1, l2=$l2")) - p = zeros(Int,l1) - for i1 = 1:l1 - for i2 = 1:l2 - if set1[i1]==set2[i2] - p[i1] = i2 - break - end - end #i2 - p[i1]!=0 || error("Sets aren't permutations of each other") - end #i1 - return p -end +# +# Helper functions for contracting ITensors +# function compute_contraction_labels(Ai,Bi) rA = length(Ai) @@ -513,6 +507,10 @@ function contract_inds(Ais::IndexSet, return (IndexSet(Cis...),Cind) end +# TODO: implement this in terms of a tuple, +# overload Base.strides and implement strides(inds,j) +# to get the jth stride +# TODO: should the IndexSet store the strides? function compute_strides(inds::IndexSet) r = order(inds) stride = zeros(Int, r) @@ -524,3 +522,68 @@ function compute_strides(inds::IndexSet) return stride end +# +# More general set functions +# + +function permute(is::IndexSet{N},perm) where {N} + indsp = ntuple(i->is[perm[i]], Val(N)) + return IndexSet(indsp) +end + +""" +getperm(col1,col2) + +Get the permutation that takes collection 2 to collection 1, +such that col2[p].==col1 +""" +function getperm(set1, set2) + l1 = length(set1) + l2 = length(set2) + l1==l2 || throw(DimensionMismatch("Mismatched input sizes in calcPerm: l1=$l1, l2=$l2")) + p = zeros(Int,l1) + for i1 = 1:l1 + for i2 = 1:l2 + if set1[i1]==set2[i2] + p[i1] = i2 + break + end + end #i2 + p[i1]!=0 || error("Sets aren't permutations of each other") + end #i1 + return p +end + +""" +Determine if s1 and s2 have no overlapping elements. +""" +function isdisjoint(s1,s2) + for i1 ∈ 1:length(s1) + for i2 ∈ 1:length(s2) + s1[i1] == s2[i2] && return false + end + end + return true +end + +""" +Determine if P is a trivial permutation. Errors if P is not a valid +permutation. +""" +function is_trivial_permutation(P) + isperm(P) || error("Input is not a permutation") + N = length(P) + for n = 1:N + P[n]!=n && return false + end + return true +end + +function decomp_permutation(s::IndexSet{N},s1::IndexSet{N1},s2::IndexSet{N2}) where {N1,N2,N} + N1+N2≠N && error("Size of partial sets don't match with total set") + perm1 = ntuple(i->findfirst(==(s1[i]),s),Val(N1)) + perm2 = ntuple(i->findfirst(==(s2[i]),s),Val(N2)) + isperm((perm1...,perm2...)) || error("Combined permutations are $((perm1...,perm2...)), not a valid permutation") + return perm1,perm2 +end + diff --git a/src/itensor.jl b/src/itensor.jl index a6f906ecd0..ec76c30766 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -20,20 +20,27 @@ export ITensor, mutable struct ITensor{N} - inds::IndexSet{N} store::TensorStorage + inds::IndexSet{N} #TODO: check that the storage is consistent with the #total dimension of the indices (possibly only in debug mode); - ITensor(is::IndexSet{N},st) where {N} = new{N}(is,st) + ITensor(st,is::IndexSet{N}) where {N} = new{N}(st,is) end +inds(T::ITensor) = T.inds +store(T::ITensor) = T.store + +# TODO: do we need these? I think yes, for add!(::ITensor,::ITensor) +setinds!(T::ITensor,is...) = (T.inds = IndexSet(is...)) +setstore!(T::ITensor,st::TensorStorage) = (T.store = st) + # # Dense ITensor constructors # -ITensor(T::Tensor) = ITensor(inds(T),store(T)) +ITensor(T::Tensor) = ITensor(store(T),inds(T)) -ITensor() = ITensor(IndexSet(),Dense{Nothing}()) +ITensor() = ITensor(Dense{Nothing}(),IndexSet()) ITensor(is::IndexSet) = ITensor(Float64,is...) ITensor(inds::Index...) = ITensor(IndexSet(inds...)) @@ -41,25 +48,25 @@ Tensor(A::ITensor) = Tensor(store(A),inds(A)) function ITensor(::Type{T}, inds::IndexSet) where {T<:Number} - return ITensor(inds,Dense{float(T)}(zero(float(T)),dim(inds))) + return ITensor(Dense{float(T)}(zeros(float(T),dim(inds))),inds) end ITensor(::Type{T},inds::Index...) where {T<:Number} = ITensor(T,IndexSet(inds...)) function ITensor(::UndefInitializer, inds::IndexSet) - return ITensor(inds,Dense{Float64}(Vector{Float64}(undef,dim(inds)))) + return ITensor(Dense{Float64}(Vector{Float64}(undef,dim(inds))),inds) end ITensor(x::UndefInitializer,inds::Index...) = ITensor(x,IndexSet(inds...)) function ITensor(x::S,inds::IndexSet) where {S<:Number} - return ITensor(inds,Dense{float(S)}(float(x),dim(inds))) + return ITensor(Dense{float(S)}(fill(float(x),dim(inds))),inds) end ITensor(x::S,inds::Index...) where {S<:Number} = ITensor(x,IndexSet(inds...)) #TODO: check that the size of the Array matches the Index dimensions function ITensor(A::Array{S},inds::IndexSet) where {S<:Number} length(A) ≠ dim(inds) && throw(DimensionMismatch("In ITensor(Array,IndexSet), length of Array ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))")) - return ITensor(inds,Dense{float(S)}(float(vec(A)))) + return ITensor(Dense{float(S)}(float(vec(A))),inds) end ITensor(A::Array{S},inds::Index...) where {S<:Number} = ITensor(A,IndexSet(inds...)) @@ -76,7 +83,7 @@ The storage will have Diag type. """ function diagITensor(::Type{T}, is::IndexSet) where {T<:Number} - return ITensor(is,Diag{Vector{T}}(zero(T),minDim(is))) + return ITensor(Diag{Vector{T}}(zero(T),minDim(is)),is) end """ @@ -99,7 +106,7 @@ The storage will have Diag type. function diagITensor(v::Vector{T}, is::IndexSet) where {T<:Number} length(v) ≠ minDim(is) && error("Length of vector for diagonal must equal minimum of the dimension of the input indices") - return ITensor(is,Diag{Vector{float(T)}}(v)) + return ITensor(Diag{Vector{float(T)}}(v),is) end """ @@ -122,7 +129,7 @@ Make a sparse ITensor of element type Float64 with non-zero elements only along the diagonal. Defaults to storing zeros along the diagonal. The storage will have Diag type. """ -diagITensor(is::IndexSet) = ITensor(is,Diag{Vector{Float64}}(zero(Float64),minDim(is))) +diagITensor(is::IndexSet) = ITensor(Diag{Vector{Float64}}(zero(Float64),minDim(is)),is) """ diagITensor(is::Index...) @@ -143,7 +150,7 @@ The storage will have Diag type. """ function diagITensor(x::T, is::IndexSet) where {T<:Number} - return ITensor(is,Diag{Vector{float(T)}}(float(x),minDim(is))) + return ITensor(Diag{Vector{float(T)}}(float(x),minDim(is)),is) end """ @@ -165,7 +172,7 @@ end Make a diagonal ITensor with all diagonal elements 1. """ function delta(::Type{T},is::IndexSet) where {T} - return ITensor(is,Diag{float(T)}(one(T))) + return ITensor(Diag{float(T)}(one(T)),is) end """ @@ -199,9 +206,6 @@ Convert to the complex version of the storage. """ Base.complex(T::ITensor) = ITensor(complex(Tensor(store(T),inds(T)))) -inds(T::ITensor) = T.inds -store(T::ITensor) = T.store - # This constructor allows many IndexSet # set operations to work with ITensors IndexSet(T::ITensor) = inds(T) @@ -306,31 +310,37 @@ function replaceindex!(A::ITensor,i::Index,j::Index) return A end -prime(A::ITensor,vargs...)= ITensor(prime(inds(A),vargs...),store(A)) +# TODO: can we turn these into a macro? +# for f ∈ (prime,setprime,noprime,...) +# f(A::ITensor,vargs...) = ITensor(store(A),f(inds(A),vargs...)) +# f!(A::ITensor,vargs...) = ( f!(inds(A),vargs...); return A ) +# end + +prime(A::ITensor,vargs...)= ITensor(store(A),prime(inds(A),vargs...)) + prime!(A::ITensor,vargs...)= ( prime!(inds(A),vargs...); return A ) adjoint(A::ITensor) = prime(A) -setprime(A::ITensor,vargs...) = ITensor(setprime(inds(A),vargs...),store(A)) - -noprime(A::ITensor,vargs...) = ITensor(noprime(inds(A),vargs...),store(A)) +setprime(A::ITensor,vargs...) = ITensor(store(A),setprime(inds(A),vargs...)) -mapprime(A::ITensor,vargs...) = ITensor(mapprime(inds(A),vargs...),store(A)) +noprime(A::ITensor,vargs...) = ITensor(store(A),noprime(inds(A),vargs...)) -swapprime(A::ITensor,vargs...) = ITensor(swapprime(inds(A),vargs...),store(A)) +mapprime(A::ITensor,vargs...) = ITensor(store(A),mapprime(inds(A),vargs...)) +swapprime(A::ITensor,vargs...) = ITensor(store(A),swapprime(inds(A),vargs...)) -addtags(A::ITensor,vargs...) = ITensor(addtags(inds(A),vargs...),store(A)) +addtags(A::ITensor,vargs...) = ITensor(store(A),addtags(inds(A),vargs...)) -removetags(A::ITensor,vargs...) = ITensor(removetags(inds(A),vargs...),store(A)) +removetags(A::ITensor,vargs...) = ITensor(store(A),removetags(inds(A),vargs...)) -replacetags(A::ITensor,vargs...) = ITensor(replacetags(inds(A),vargs...),store(A)) +replacetags(A::ITensor,vargs...) = ITensor(store(A),replacetags(inds(A),vargs...)) -replacetags!(A::ITensor,vargs...) = replacetags!(A.inds,vargs...) +replacetags!(A::ITensor,vargs...) = ( replacetags!(inds(A),vargs...); return A ) -settags(A::ITensor,vargs...) = ITensor(settags(inds(A),vargs...),store(A)) +settags(A::ITensor,vargs...) = ITensor(store(A),settags(inds(A),vargs...)) -swaptags(A::ITensor,vargs...) = ITensor(swaptags(inds(A),vargs...),store(A)) +swaptags(A::ITensor,vargs...) = ITensor(store(A),swaptags(inds(A),vargs...)) # TODO: implement in a better way (more generically for other storage) function Base.:(==)(A::ITensor,B::ITensor) @@ -384,16 +394,13 @@ LinearAlgebra.norm(T::ITensor) = norm(Tensor(T)) function dag(T::ITensor) TT = conj(Tensor(T)) - return ITensor(dag(inds(T)),store(TT)) + return ITensor(store(TT),dag(inds(T))) end function permute(T::ITensor,new_inds) - #permTis = IndexSet(permTinds) - #permTstore = typeof(store(T))(dim(T)) - #storage_permute!(permTstore,permTis,store(T),inds(T)) - perm = getperm(inds(T),new_inds) - Tp = permutedims(Tensor(store(T),inds(T)),perm) - return ITensor(Tp) + perm = getperm(new_inds,inds(T)) + TT = permutedims(Tensor(store(T),inds(T)),perm) + return ITensor(TT) end permute(T::ITensor,inds::Index...) = permute(T,IndexSet(inds...)) @@ -407,12 +414,12 @@ Base.:/(A::ITensor,x::Number) = A*(1.0/x) Base.:-(A::ITensor) = ITensor(-Tensor(A)) function Base.:+(A::ITensor,B::ITensor) C = copy(A) - add!(C,B) + C = add!(C,B) return C end function Base.:-(A::ITensor,B::ITensor) C = copy(A) - add!(C,-1,B) + C = add!(C,-1,B) return C end @@ -456,8 +463,8 @@ B .= A """ function Base.copyto!(R::ITensor{N},T::ITensor{N}) where {N} perm = getperm(inds(R),inds(T)) - permutedims!(Tensor(R),Tensor(T),perm) - return R + TR = permutedims!(Tensor(R),Tensor(T),perm) + return ITensor(TR) end """ @@ -470,15 +477,16 @@ B .+= A B .+= α .* A ``` """ -add!(T1::ITensor,T2::ITensor) = add!(T1,1,T2) +add!(R::ITensor,T::ITensor) = add!(R,1,T) -function add!(T1::ITensor,α2::Number,T2::ITensor) - # TODO: is replacing the storage entirely the best way - # to do this logic? Is this worse in the case that - # the storage type stays the same? - perm = getperm(inds(T1),inds(T2)) - permutedims!(Tensor(T1),Tensor(T2),perm,(t1,t2)->t1+α2*t2) - return T1 +function add!(R::ITensor{N},α::Number,T::ITensor{N}) where {N} + perm = getperm(inds(R),inds(T)) + TR,TT = Tensor(R),Tensor(T) + TR = convert(promote_type(typeof(TR),typeof(TT)),TR) + TR = permutedims!(TR,TT,perm,(r,t)->r+α*t) + setstore!(R,store(TR)) + setinds!(R,inds(TR)) + return R end """ @@ -489,10 +497,10 @@ Add ITensors α*A and β*B and store the result in A. A .= α .* A .+ β .* B ``` """ -function add!(R::ITensor,αr::Number,αt::Number,T::ITensor) +function add!(R::ITensor{N},αr::Number,αt::Number,T::ITensor{N}) where {N} perm = getperm(inds(R),inds(T)) - permutedims!(Tensor(R),Tensor(T),perm,(r,t)->αr*r+αt*t) - return R + TR = permutedims!(Tensor(R),Tensor(T),perm,(r,t)->αr*r+αt*t) + return ITensor(TR) end """ @@ -565,8 +573,7 @@ end function multSiteOps(A::ITensor, B::ITensor) - R = copy(A) - prime!(R,"Site") + R = prime(A,"Site") R *= B return mapprime(R,2,1) end diff --git a/src/sets.jl b/src/sets.jl deleted file mode 100644 index 842375c596..0000000000 --- a/src/sets.jl +++ /dev/null @@ -1,10 +0,0 @@ - -function isdisjoint(s1,s2) - for i1 ∈ 1:length(s1) - for i2 ∈ 1:length(s2) - s1[i1] == s2[i2] && return false - end - end - return true -end - diff --git a/src/tensor/combiner.jl b/src/tensor/combiner.jl index 6a929c60dc..f001e105c7 100644 --- a/src/tensor/combiner.jl +++ b/src/tensor/combiner.jl @@ -10,7 +10,7 @@ function storage_contract(CSstore::CombinerStorage, dis::IndexSet) cind = Cis[1] if hasindex(dis, cind) # has combined index, uncombine - cpos = findindex(dis,cind) + cpos = indexposition(dis,cind) dinds = inds(dis) Cinds = inds(Cis) Nis = IndexSet(vcat(dinds[1:cpos-1], Cinds[2:end], dinds[cpos+1:end])) @@ -18,8 +18,8 @@ function storage_contract(CSstore::CombinerStorage, else # lacks combined index, combine # dis doesn't have cind, replace # Cis[1], Cis[2], ... with cind, may need to permute - j_1_pos = findindex(dis,Cis[2]) - if j_1_pos < 1 + j_1_pos = indexposition(dis,Cis[2]) + if j_1_pos === nothing throw(ArgumentError("tensor missing index $(Cis[2]) in combiner-tensor product")) end @@ -46,8 +46,8 @@ function storage_contract(CSstore::CombinerStorage, # permute combined indices to the front, in same order as in Cis: ni = 1 for c in 2:length(Cis) - j = findindex(dis, Cis[c]) - if j < 1 + j = indexposition(dis, Cis[c]) + if j === nothing throw(ArgumentError("tensor missing index $(Cis[c]) in combiner-tensor product")) end P[j] = ni diff --git a/src/tensor/contract.jl b/src/tensor/contraction_logic.jl similarity index 82% rename from src/tensor/contract.jl rename to src/tensor/contraction_logic.jl index a2fa261c2f..130dce0429 100644 --- a/src/tensor/contract.jl +++ b/src/tensor/contraction_logic.jl @@ -16,7 +16,7 @@ # return true #end -mutable struct CProps +mutable struct ContractionProperties ai::Vector{Int} bi::Vector{Int} ci::Vector{Int} @@ -44,16 +44,16 @@ mutable struct CProps newArange::Vector{Int} newBrange::Vector{Int} newCrange::Vector{Int} - function CProps(ai::Vector{Int}, - bi::Vector{Int}, - ci::Vector{Int}) + function ContractionProperties(ai::Vector{Int}, + bi::Vector{Int}, + ci::Vector{Int}) new(ai,bi,ci,0,0,0,Vector{Int}(),Vector{Int}(),Vector{Int}(),false,false,false,1,1,1,0, length(ai),length(bi),length(ai),length(bi),Vector{Int}(),Vector{Int}(),Vector{Int}(), false,Vector{Int}(),Vector{Int}(),Vector{Int}()) end end -function compute_perms!(props::CProps)::Nothing +function compute_perms!(props::ContractionProperties)::Nothing #Use !AtoB.empty() as a check to see if we've already run this length(props.AtoB)!=0 && return @@ -101,14 +101,14 @@ function compute_perms!(props::CProps)::Nothing end -function is_trivial_permutation(P::Vector{Int})::Bool - for n = 1:length(P) - P[n]!=n && return false - end - return true -end +#function is_trivial_permutation(P::Vector{Int})::Bool +# for n = 1:length(P) +# P[n]!=n && return false +# end +# return true +#end -function checkACsameord(props::CProps)::Bool +function checkACsameord(props::ContractionProperties)::Bool props.Austart>=length(props.ai) && return true aCind = props.AtoC[props.Austart] for i = 1:length(props.ai) @@ -120,7 +120,7 @@ function checkACsameord(props::CProps)::Bool return true end -function checkBCsameord(props::CProps)::Bool +function checkBCsameord(props::ContractionProperties)::Bool props.Bustart>=length(props.bi) && return true bCind = props.BtoC[props.Bustart] for i = 1:length(props.bi) @@ -132,35 +132,32 @@ function checkBCsameord(props::CProps)::Bool return true end -contractedA(props::CProps,i::Int) = (props.AtoC[i]<1) -contractedB(props::CProps,i::Int) = (props.BtoC[i]<1) -Atrans(props::CProps) = contractedA(props,1) -Btrans(props::CProps) = !contractedB(props,1) -Ctrans(props::CProps) = props.ctrans +contractedA(props::ContractionProperties,i::Int) = (props.AtoC[i]<1) +contractedB(props::ContractionProperties,i::Int) = (props.BtoC[i]<1) +Atrans(props::ContractionProperties) = contractedA(props,1) +Btrans(props::ContractionProperties) = !contractedB(props,1) +Ctrans(props::ContractionProperties) = props.ctrans # TODO: replace find_index(v,t) with built in Julia function: # findfirst(==(t), v) -function find_index(v::Vector{Int},t)::Int - for i = 1:length(v) - v[i]==t && return i - end - return -1 -end +#function find_index(v::Vector{Int},t)::Int +# for i = 1:length(v) +# v[i]==t && return i +# end +# return -1 +#end -function permute_extents(R::Vector{Int},P::Vector{Int})::Vector{Int} - Rb = fill(0,length(R)) - n = 1 - for pn in P - Rb[pn] = R[n] - n += 1 - end - return Rb -end +#function permute_extents(R::Vector{Int},P::Vector{Int})::Vector{Int} +# Rb = fill(0,length(R)) +# n = 1 +# for pn in P +# Rb[pn] = R[n] +# n += 1 +# end +# return Rb +#end -function compute!(props::CProps, - A, - B, - C) +function compute_contraction_properties!(props::ContractionProperties, A, B, C) compute_perms!(props) #Use props.PC.size() as a check to see if we've already run this @@ -276,7 +273,8 @@ function compute!(props::CProps, bind = props.Bcstart for i = 1:props.ncont while !contractedB(props,bind) bind += 1 end - j = find_index(props.ai,props.bi[bind]) + #j = find_index(props.ai,props.bi[bind]) + j = findfirst(==(props.bi[bind]),props.ai) newi += 1 props.PA[newi] = j bind += 1 @@ -287,7 +285,8 @@ function compute!(props::CProps, #appear in same order as on C #TODO: check this is correct for 1-indexing for k = 1:rc - j = find_index(props.ai,props.ci[k]) + #j = find_index(props.ai,props.ci[k]) + j = findfirst(==(props.ci[k]),props.ai) if j!=-1 props.AtoC[newi] = k props.PA[newi+1] = j @@ -307,7 +306,8 @@ function compute!(props::CProps, else props.Austart = min(i,props.Austart) end - props.newArange = permute_extents([size(A)...],props.PA) + #props.newArange = permute_extents([size(A)...],props.PA) + props.newArange = [size(A)...][props.PA] end if(props.permuteB) @@ -335,7 +335,8 @@ function compute!(props::CProps, aind = props.Acstart for i = 0:(props.ncont-1) while !contractedA(props,aind) aind += 1 end - j = find_index(props.bi,props.ai[aind]) + #j = find_index(props.bi,props.ai[aind]) + j = findfirst(==(props.ai[aind]),props.bi) newi += 1 props.PB[newi] = j aind += 1 @@ -346,7 +347,8 @@ function compute!(props::CProps, #Permute uncontracted indices to #appear in same order as on C for k = 1:rc - j = find_index(props.bi,props.ci[k]) + #j = find_index(props.bi,props.ci[k]) + j = findfirst(==(props.ci[k]),props.bi) if j!=-1 props.BtoC[newi] = k props.PB[newi+1] = j @@ -363,7 +365,8 @@ function compute!(props::CProps, props.Bustart = min(i,props.Bustart) end end - props.newBrange = permute_extents([size(B)...],props.PB) + #props.newBrange = permute_extents([size(B)...],props.PB) + props.newBrange = [size(B)...][props.PB] end if props.permuteA || props.permuteB @@ -496,27 +499,27 @@ end #end #TODO: this should be optimized -function _contract_scalar!(Cdata::Array,Clabels::Vector{Int}, - Bdata::Array,Blabels::Vector{Int},α,β) - p = calculate_permutation(Blabels,Clabels) - if β==0 - if is_trivial_permutation(p) - Cdata .= α.*Bdata - else - #TODO: make an optimized permutedims!() that also scales the data - permutedims!(Cdata,α*Bdata) - end - else - if is_trivial_permutation(p) - Cdata .= α.*Bdata .+ β.*Cdata - else - #TODO: make an optimized permutedims!() that also adds and scales the data - permBdata = permutedims(Bdata,p) - Cdata .= α.*permBdata .+ β.*Cdata - end - end - return -end +#function _contract_scalar!(Cdata::Array,Clabels::Vector{Int}, +# Bdata::Array,Blabels::Vector{Int},α,β) +# p = calculate_permutation(Blabels,Clabels) +# if β==0 +# if is_trivial_permutation(p) +# Cdata .= α.*Bdata +# else +# #TODO: make an optimized permutedims!() that also scales the data +# permutedims!(Cdata,α*Bdata) +# end +# else +# if is_trivial_permutation(p) +# Cdata .= α.*Bdata .+ β.*Cdata +# else +# #TODO: make an optimized permutedims!() that also adds and scales the data +# permBdata = permutedims(Bdata,p) +# Cdata .= α.*permBdata .+ β.*Cdata +# end +# end +# return +#end #function contract!(Cdata::Array{T},Clabels::Vector{Int}, # Adata::Array{T},Alabels::Vector{Int}, diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index d73d103c4b..0f7519cf7c 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -3,10 +3,10 @@ export Dense struct Dense{T} <: TensorStorage data::Vector{T} Dense{T}(data::Vector) where {T} = new{T}(convert(Vector{T},data)) - Dense(data::Vector{T}) where {T} = new{T}(data) - Dense{T}(size::Integer) where {T} = new{T}(Vector{T}(undef,size)) - Dense{T}(x::Number,size::Integer) where {T} = new{T}(fill(T(x),size)) - Dense(x::T,size::Integer) where {T<:Number} = new{T}(fill(T,size)) + #Dense(data::Vector{T}) where {T} = new{T}(data) + #Dense{T}(size::Integer) where {T} = new{T}(Vector{T}(undef,size)) + #Dense{T}(x::Number,size::Integer) where {T} = new{T}(fill(T(x),size)) + #Dense(x::T,size::Integer) where {T<:Number} = new{T}(fill(T,size)) Dense{T}() where {T} = new{T}(Vector{T}()) end @@ -16,38 +16,43 @@ data(D::Dense) = D.data Base.getindex(D::Dense,i::Integer) = data(D)[i] Base.setindex!(D::Dense,v,i::Integer) = (data(D)[i] = v) -Base.similar(D::Dense{T}) where {T} = Dense{T}(length(D)) -Base.similar(D::Dense{T},dims) where {T} = Dense{T}(dim(dims)) -Base.similar(::Type{Dense{T}},dims) where {T} = Dense{T}(dim(dims)) -Base.similar(D::Dense{T},::Type{S}) where {T,S} = Dense{S}(length(D)) +Base.similar(D::Dense{T}) where {T} = Dense{T}(similar(data(D))) +Base.similar(D::Dense{T},dims) where {T} = Dense{T}(similar(data(D),dim(dims))) +Base.similar(::Type{Dense{T}},dims) where {T} = Dense{T}(similar(Vector{T},dim(dims))) +Base.similar(D::Dense,::Type{T}) where {T} = Dense{T}(similar(data(D),T)) Base.copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) -Base.eltype(D::Dense{T}) where {T} = eltype(T) +Base.eltype(::Dense{T}) where {T} = eltype(T) # This is necessary since for some reason inference doesn't work # with the more general definition (eltype(Nothing) === Any) -Base.eltype(D::Dense{Nothing}) = Nothing +Base.eltype(::Dense{Nothing}) = Nothing Base.eltype(::Type{Dense{T}}) where {T} = eltype(T) #Base.length(D::Dense) = length(data(D)) +Base.promote_rule(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} +Base.convert(::Type{Dense{R}},D::Dense) where {R} = Dense{R}(convert(Vector{R},data(D))) + +const DenseTensor{El,N,Inds} = Tensor{El,N,<:Dense,Inds} + # Basic functionality for AbstractArray interface -Base.IndexStyle(::Type{T}) where {T<:Tensor{ElT,N,StoreT}} where {ElT,N,StoreT<:Dense} = IndexLinear() -Base.getindex(T::Tensor{ElT,N,StoreT},i::Integer) where {ElT,N,StoreT<:Dense} = store(T)[i] -Base.setindex!(T::Tensor{ElT,N,StoreT},v,i::Integer) where {ElT,N,StoreT<:Dense} = (store(T)[i] = v) +Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() +Base.getindex(T::DenseTensor,i::Integer) = store(T)[i] +Base.setindex!(T::DenseTensor,v,i::Integer) = (store(T)[i] = v) # Create an Array that is a view of the Dense Tensor # Useful for using Base Array functions -Base.Array(T::Tensor) = reshape(data(store(T)),dims(inds(T))) -Base.Vector(T::Tensor) = vec(Array(T)) +Base.Array(T::DenseTensor) = reshape(data(store(T)),dims(inds(T))) +Base.Vector(T::DenseTensor) = vec(Array(T)) -function Base.permutedims!(T1::Tensor{ElT1,N,StoreT1}, - T2::Tensor{ElT2,N,StoreT2}, - perm) where {ElT1,ElT2,N,StoreT1<:Dense,StoreT2<:Dense} +function Base.permutedims!(T1::DenseTensor{<:Number,N}, + T2::DenseTensor{<:Number,N}, + perm) where {N} permutedims!(Array(T1),Array(T2),perm) return T1 end -function Base.permutedims(T::Tensor{ElT,N,StoreT}, - perm) where {ElT,N,StoreT<:Dense} +function Base.permutedims(T::DenseTensor, + perm) Tp = similar(T,permute(inds(T),perm)) permutedims!(Tp,T,perm) return Tp @@ -65,10 +70,10 @@ using Base.Cartesian: @nexprs, # Based off of the permutedims! implementation in Julia's base: # https://github.com/JuliaLang/julia/blob/91151ab871c7e7d6689d1cfa793c12062d37d6b6/base/multidimensional.jl#L1355 # -@generated function Base.permutedims!(TP::Tensor{ElTP,N,<:Dense}, - T::Tensor{ElT,N,<:Dense}, +@generated function Base.permutedims!(TP::DenseTensor{<:Number,N}, + T::DenseTensor{<:Number,N}, perm, - f::Function) where {ElTP,ElT,N} + f::Function) where {N} quote Base.checkdims_perm(TP, T, perm) @@ -95,7 +100,7 @@ using Base.Cartesian: @nexprs, end end -function outer(T1::Tensor,T2::Tensor) +function outer(T1::DenseTensor,T2::DenseTensor) return Tensor(Dense(vec(Vector(T1)*transpose(Vector(T2)))),union(inds(T1),inds(T2))) end const ⊗ = outer @@ -241,17 +246,17 @@ const ⊗ = outer # throw(ErrorException("Cannot convert Dense -> Number for length of data greater than 1")) #end -function contract(T1::Tensor{ElT1,N1,<:Dense},labelsT1, - T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElT1,ElT2,N1,N2} +function contract(T1::DenseTensor,labelsT1, + T2::DenseTensor,labelsT2) indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) R = similar(promote_type(typeof(T1),typeof(T2)),indsR) contract!(R,labelsR,T1,labelsT1,T2,labelsT2) return R end -function contract!(R::Tensor{ElR,NR,<:Dense},labelsR, - T1::Tensor{ElT1,N1,<:Dense},labelsT1, - T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElR,ElT1,ElT2,NR,N1,N2} +function contract!(R::DenseTensor,labelsR, + T1::DenseTensor{<:Number,N1},labelsT1, + T2::DenseTensor{<:Number,N2},labelsT2) where {N1,N2} if N1==0 # TODO: replace with an add! function? # What about doing `R .= T1[] .* PermutedDimsArray(T2,perm)`? @@ -277,17 +282,17 @@ function contract!(R::Tensor{ElR,NR,<:Dense},labelsR, return R end -function _contract!(R::Tensor{ElR,NR,<:Dense},labelsR, - T1::Tensor{ElT1,N1,<:Dense},labelsT1, - T2::Tensor{ElT2,N2,<:Dense},labelsT2) where {ElR,ElT1,ElT2,NR,N1,N2} - props = CProps(labelsT1,labelsT2,labelsR) - compute!(props,T1,T2,R) +function _contract!(R::DenseTensor,labelsR, + T1::DenseTensor,labelsT1, + T2::DenseTensor,labelsT2) + props = ContractionProperties(labelsT1,labelsT2,labelsR) + compute_contraction_properties!(props,T1,T2,R) _contract_dense_dense!(Array(R),props,Array(T1),Array(T2)) return R end function _contract_dense_dense!(C::Array{T}, - p::CProps, + p::ContractionProperties, A::Array{T}, B::Array{T}, α::T=one(T), @@ -345,6 +350,28 @@ function _contract_dense_dense!(C::Array{T}, return end +function _contract_scalar!(Cdata::Array,Clabels::Vector{Int}, + Bdata::Array,Blabels::Vector{Int},α,β) + p = calculate_permutation(Blabels,Clabels) + if β==0 + if is_trivial_permutation(p) + Cdata .= α.*Bdata + else + #TODO: make an optimized permutedims!() that also scales the data + permutedims!(Cdata,α*Bdata) + end + else + if is_trivial_permutation(p) + Cdata .= α.*Bdata .+ β.*Cdata + else + #TODO: make an optimized permutedims!() that also adds and scales the data + permBdata = permutedims(Bdata,p) + Cdata .= α.*permBdata .+ β.*Cdata + end + end + return +end + #function _contract(Cinds::IndexSet, Clabels::Vector{Int}, # Astore::Dense{SA}, Ainds::IndexSet, Alabels::Vector{Int}, # Bstore::Dense{SB}, Binds::IndexSet, Blabels::Vector{Int}) where {SA<:Number,SB<:Number} @@ -371,33 +398,66 @@ end # elseif(length(Blabels)==0) # contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) # else -# props = CProps(Alabels,Blabels,Clabels) -# compute!(props,Adata,Bdata,Cdata) +# props = ContractionProperties(Alabels,Blabels,Clabels) +# compute_contraction_properties!(props,Adata,Bdata,Cdata) # _contract_dense_dense!(Cdata,props,Adata,Bdata) # end # # return Cstore #end -function storage_svd(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet; - kwargs...) where {T} +# Combine a bunch of tuples +@inline tuplejoin(x) = x +@inline tuplejoin(x, y) = (x..., y...) +@inline tuplejoin(x, y, z...) = (x..., tuplejoin(y, z...)...) + +# TODO: write this function +function permutereshape(T::DenseTensor,pos::NTuple{N,NTuple{<:Any,Int}}) where {N} + perm = tuplejoin(pos...) + if !is_trivial_permutation(perm) + T = permutedims(T,perm) + end + @show pos + @show perm + @show prod.(pos) + #newdims = dims(T)[perm] + Tr = reshape(T,prod.(pos)) + return Tr +end + +function LinearAlgebra.svd(T::DenseTensor{<:Number,N}, + Lpos::NTuple{NL,<:Integer}, + Rpos::NTuple{NR,<:Integer}; + kwargs...) where {N,NL,NR} + NL+NR≠N && error("Index positions ($NL and $NR) must add up to order of Tensor ($N)") + M = permutereshape(T,(Lpos,Rpos)) + UM,S,VM = svd(M;kwargs...) + u = ind(UM,2) + v = ind(VM,1) + Uinds = push(inds(T)[Lpos],u) + Vinds = push(inds(T)[Rpos],v) + U = reshape(UM,Uinds) + V = reshape(VM,Vinds) + return U,S,V +end + +function LinearAlgebra.svd(T::DenseTensor{<:Number,2,IndsT}, + kwargs...) where {IndsT} maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) mindim::Int = get(kwargs,:mindim,1) cutoff::Float64 = get(kwargs,:cutoff,0.0) absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - utags::String = get(kwargs,:utags,"Link,u") - vtags::String = get(kwargs,:vtags,"Link,v") + #utags::String = get(kwargs,:utags,"Link,u") + #vtags::String = get(kwargs,:vtags,"Link,v") fastSVD::Bool = get(kwargs,:fastSVD,false) if fastSVD - MU,MS,MV = svd(reshape(data(Astore),dim(Lis),dim(Ris))) + MU,MS,MV = svd(Matrix(T)) else - MU,MS,MV = recursiveSVD(reshape(data(Astore),dim(Lis),dim(Ris))) + MU,MS,MV = recursiveSVD(Matrix(T)) end - MV = conj!(MV) + conj!(MV) P = MS.^2 #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff @@ -413,91 +473,94 @@ function storage_svd(Astore::Dense{T}, MV = MV[:,1:dS] end - u = Index(dS,utags) - v = settags(u,vtags) - Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) - #TODO: make a diag storage - Sis,Sstore = IndexSet(u,v),Diag{Vector{Float64}}(MS) - Vis,Vstore = IndexSet(Ris...,v),Dense{T}(Vector{T}(vec(MV))) + # Make the new indices to go onto U and V + u = eltype(IndsT)(dS) + v = eltype(IndsT)(dS) - return (Uis,Ustore,Sis,Sstore,Vis,Vstore) -end + Uinds = push(ind(T,1),u) + Vinds = push(ind(T,2),v) -function storage_eigen(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet; - kwargs...) where {T} - maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) - mindim::Int = get(kwargs,:mindim,1) - cutoff::Float64 = get(kwargs,:cutoff,0.0) - absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) - doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - tags::TagSet = get(kwargs,:lefttags,"Link,u") - lefttags::TagSet = get(kwargs,:lefttags,tags) - righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) - - dim_left = dim(Lis) - dim_right = dim(Ris) - MD,MU = eigen(Hermitian(reshape(data(Astore),dim_left,dim_right))) - - # Sort by largest to smallest eigenvalues - p = sortperm(MD; rev = true) - MD = MD[p] - MU = MU[:,p] + U = Tensor(Dense{T}(vec(MU)),Uinds) + S = Tensor(Diag{Vector{Float64}}(MS),IndsT(u,v)) + V = Tensor(Dense{T}(vec(MV)),Vinds) - #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff - truncate!(MD;maxdim=maxdim, - cutoff=cutoff, - absoluteCutoff=absoluteCutoff, - doRelCutoff=doRelCutoff) - dD = length(MD) - if dD < size(MU,2) - MU = MU[:,1:dD] - end - - #TODO: include truncation parameters as keyword arguments - u = Index(dD,lefttags) - v = settags(u,righttags) - Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) - Dis,Dstore = IndexSet(u,v),Diag{Vector{Float64}}(MD) - return (Uis,Ustore,Dis,Dstore) -end + #u = Index(dS,utags) + #v = settags(u,vtags) + #Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) + #TODO: make a diag storage + #Sis,Sstore = IndexSet(u,v),Diag{Vector{Float64}}(MS) + #Vis,Vstore = IndexSet(Ris...,v),Dense{T}(Vector{T}(vec(MV))) -# TODO: move this to a general "linear_algebra.jl" file? -# i.e. a common place for custom linear algebra functionality -# for matrices -function polar(A::Matrix) - U,S,V = svd(A) # calls LinearAlgebra.svd() - return U*V',V*Diagonal(S)*V' + return U,S,V end -function storage_qr(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet; - kwargs...) where {T} - tags::TagSet = get(kwargs,:tags,"Link,u") - dim_left = dim(Lis) - dim_right = dim(Ris) - MQ,MP = qr(reshape(data(Astore),dim_left,dim_right)) - dim_middle = min(dim_left,dim_right) - u = Index(dim_middle,tags) - #Must call Matrix() on MQ since the QR decomposition outputs a sparse - #form of the decomposition - Qis,Qstore = IndexSet(Lis...,u),Dense{T}(vec(Matrix(MQ))) - Pis,Pstore = IndexSet(u,Ris...),Dense{T}(vec(Matrix(MP))) - return (Qis,Qstore,Pis,Pstore) -end +#function storage_eigen(Astore::Dense{T}, +# Lis::IndexSet, +# Ris::IndexSet; +# kwargs...) where {T} +# maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) +# mindim::Int = get(kwargs,:mindim,1) +# cutoff::Float64 = get(kwargs,:cutoff,0.0) +# absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) +# doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) +# tags::TagSet = get(kwargs,:lefttags,"Link,u") +# lefttags::TagSet = get(kwargs,:lefttags,tags) +# righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) +# +# dim_left = dim(Lis) +# dim_right = dim(Ris) +# MD,MU = eigen(Hermitian(reshape(data(Astore),dim_left,dim_right))) +# +# # Sort by largest to smallest eigenvalues +# p = sortperm(MD; rev = true) +# MD = MD[p] +# MU = MU[:,p] +# +# #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff +# truncate!(MD;maxdim=maxdim, +# cutoff=cutoff, +# absoluteCutoff=absoluteCutoff, +# doRelCutoff=doRelCutoff) +# dD = length(MD) +# if dD < size(MU,2) +# MU = MU[:,1:dD] +# end +# +# #TODO: include truncation parameters as keyword arguments +# u = Index(dD,lefttags) +# v = settags(u,righttags) +# Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) +# Dis,Dstore = IndexSet(u,v),Diag{Vector{Float64}}(MD) +# return (Uis,Ustore,Dis,Dstore) +#end -function storage_polar(Astore::Dense{T}, - Lis::IndexSet, - Ris::IndexSet) where {T} - dim_left = dim(Lis) - dim_right = dim(Ris) - MQ,MP = polar(reshape(data(Astore),dim_left,dim_right)) - dim_middle = min(dim_left,dim_right) - Uis = prime(Ris) - Qis,Qstore = IndexSet(Lis...,Uis...),Dense{T}(vec(MQ)) - Pis,Pstore = IndexSet(Uis...,Ris...),Dense{T}(vec(MP)) - return (Qis,Qstore,Pis,Pstore) -end +#function storage_qr(Astore::Dense{T}, +# Lis::IndexSet, +# Ris::IndexSet; +# kwargs...) where {T} +# tags::TagSet = get(kwargs,:tags,"Link,u") +# dim_left = dim(Lis) +# dim_right = dim(Ris) +# MQ,MP = qr(reshape(data(Astore),dim_left,dim_right)) +# dim_middle = min(dim_left,dim_right) +# u = Index(dim_middle,tags) +# #Must call Matrix() on MQ since the QR decomposition outputs a sparse +# #form of the decomposition +# Qis,Qstore = IndexSet(Lis...,u),Dense{T}(vec(Matrix(MQ))) +# Pis,Pstore = IndexSet(u,Ris...),Dense{T}(vec(Matrix(MP))) +# return (Qis,Qstore,Pis,Pstore) +#end +# +#function storage_polar(Astore::Dense{T}, +# Lis::IndexSet, +# Ris::IndexSet) where {T} +# dim_left = dim(Lis) +# dim_right = dim(Ris) +# MQ,MP = polar(reshape(data(Astore),dim_left,dim_right)) +# dim_middle = min(dim_left,dim_right) +# Uis = prime(Ris) +# Qis,Qstore = IndexSet(Lis...,Uis...),Dense{T}(vec(MQ)) +# Pis,Pstore = IndexSet(Uis...,Ris...),Dense{T}(vec(MP)) +# return (Qis,Qstore,Pis,Pstore) +#end diff --git a/src/tensor/svd.jl b/src/tensor/svd.jl index 25faa394b1..b170b8b014 100644 --- a/src/tensor/svd.jl +++ b/src/tensor/svd.jl @@ -39,8 +39,8 @@ function pos_sqrt(x::Float64)::Float64 return sqrt(x) end -function checkSVDDone(S::Vector{T}, - thresh::Float64) where {T} +function checkSVDDone(S::Vector, + thresh::Float64) N = length(S) (N <= 1 || thresh < 0.0) && return (true,1) S1t = S[1]*thresh @@ -55,9 +55,9 @@ function checkSVDDone(S::Vector{T}, return (false,start) end -function recursiveSVD(M::AbstractMatrix{T}; +function recursiveSVD(M::AbstractMatrix; thresh::Float64=1E-3, - north_pass::Int=2) where {T} + north_pass::Int=2) Mr,Mc = size(M) if Mr > Mc @@ -97,3 +97,10 @@ function recursiveSVD(M::AbstractMatrix{T}; return U,D,V end + +# TODO: maybe move to another location? +function polar(M::AbstractMatrix) + U,S,V = svd(M) # calls LinearAlgebra.svd() + return U*V',V*Diagonal(S)*V' +end + diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index f68764dd2c..1d303dccbd 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -18,6 +18,7 @@ end store(T::Tensor) = T.store inds(T::Tensor) = T.inds +ind(T::Tensor,j::Integer) = inds(T)[j] # This function is used in the AbstractArray interface #Base.axes(T::Tensor) = Tuple(inds(T)) @@ -41,8 +42,18 @@ end # TODO: implement these versions Base.similar(T::Tensor) = Tensor(similar(store(T)),inds(T)) Base.similar(T::Tensor,dims) = Tensor(similar(store(T),dims),dims) -#Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) -#Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) +Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) +Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) + +function Base.convert(::Type{Tensor{ElR,N,StoreR,Inds}}, + T::Tensor{ElT,N,StoreT,Inds}) where {ElR,ElT,N,StoreR,StoreT,Inds} + return Tensor(convert(StoreR,store(T)),inds(T)) +end + +function Base.promote_rule(::Type{Tensor{ElT1,N,StoreT1,Inds}}, + ::Type{Tensor{ElT2,N,StoreT2,Inds}}) where {ElT1,ElT2,N,StoreT1,StoreT2,Inds} + return Tensor{promote_type(ElT1,ElT2),N,promote_type(StoreT1,StoreT2),Inds} +end Base.BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() diff --git a/src/tensor/truncate.jl b/src/tensor/truncate.jl new file mode 100644 index 0000000000..4080f7abae --- /dev/null +++ b/src/tensor/truncate.jl @@ -0,0 +1,76 @@ + +function truncate!(P::Vector{Float64}; + kwargs...)::Tuple{Float64,Float64} + maxdim::Int = min(get(kwargs,:maxdim,length(P)), length(P)) + mindim::Int = max(get(kwargs,:mindim,1), 1) + cutoff::Float64 = max(get(kwargs,:cutoff,0.0), 0.0) + absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) + doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) + + origm = length(P) + docut = 0.0 + + if P[1]<=0.0 + P[1] = 0.0 + resize!(P,1) + return 0.,0. + end + + if origm==1 + docut = P[1]/2 + return 0.,docut + end + + #Zero out any negative weight + for n=origm:-1:1 + (P[n] >= 0.0) && break + P[n] = 0.0 + end + + n = origm + truncerr = 0.0 + while n > maxdim + truncerr += P[n] + n -= 1 + end + + if absoluteCutoff + #Test if individual prob. weights fall below cutoff + #rather than using *sum* of discarded weights + while P[n] <= cutoff && n > mindim + truncerr += P[n] + n -= 1 + end + else + scale = 1.0 + if doRelCutoff + scale = sum(P) + (scale==0.0) && (scale = 1.0) + end + + #Continue truncating until *sum* of discarded probability + #weight reaches cutoff reached (or m==mindim) + while (truncerr+P[n] <= cutoff*scale) && (n > mindim) + truncerr += P[n] + n -= 1 + end + + truncerr /= scale + end + + if n < 1 + n = 1 + end + + if n < origm + docut = (P[n]+P[n+1])/2 + if abs(P[n]-P[n+1]) < 1E-3*P[n] + docut += 1E-3*P[n] + end + end + + resize!(P,n) + + return truncerr,docut +end + diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index ca98e638f0..fea22ac9c3 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -376,7 +376,7 @@ end for ii ∈ 1:dim(i), jj ∈ 1:dim(j), kk ∈ 1:dim(k) @test A[j(jj),k(kk),i(ii)]==digits(SType,ii,jj,kk) end - @test_throws ErrorException A[1] + @test_throws MethodError A[1] end @testset "Test permute(ITensor,Index...)" begin A = randomITensor(SType,i,k,j) @@ -390,20 +390,21 @@ end for ii ∈ 1:dim(i), jj ∈ 1:dim(j), kk ∈ 1:dim(k) @test A[k(kk),i(ii),j(jj)]==permA[i(ii),j(jj),k(kk)] end - # TODO: why do we need this? can't we just splat? - @testset "getindex and setindex with vector of IndexVals" begin - k_inds = [k(kk) for kk ∈ 1:dim(k)] - for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - @test A[k_inds...,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds...] - end - for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - A[k_inds...,i(ii),j(jj)]=collect(1:length(k_inds)) - end - permA = permute(A,k,j,i) - for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - @test A[k_inds...,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds...] - end - end + # TODO: I think this was doing slicing, but what is the output + # of slicing an ITensor? + #@testset "getindex and setindex with vector of IndexVals" begin + # k_inds = [k(kk) for kk ∈ 1:dim(k)] + # for ii ∈ 1:dim(i), jj ∈ 1:dim(j) + # @test A[k_inds,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds...] + # end + # for ii ∈ 1:dim(i), jj ∈ 1:dim(j) + # A[k_inds,i(ii),j(jj)]=collect(1:length(k_inds)) + # end + # permA = permute(A,k,j,i) + # for ii ∈ 1:dim(i), jj ∈ 1:dim(j) + # @test A[k_inds,i(ii),j(jj)]==permA[i(ii),j(jj),k_inds...] + # end + #end end @testset "Set and get values with Ints" begin A = ITensor(SType,i,j,k) @@ -421,10 +422,7 @@ end A = ITensor(x) @test x==scalar(A) A = ITensor(SType,i,j,k) - @test_throws ArgumentError scalar(A) - # test the storage_scalar error throw - ds = Dense{Float64}(rand(10)) - @test_throws ErrorException ITensor.storage_scalar(ds) + @test_throws BoundsError scalar(A) end @testset "Test norm(ITensor)" begin A = randomITensor(SType,i,j,k) @@ -454,13 +452,11 @@ end @testset "Test SVD truncation" begin M = randn(4,4) + randn(4,4)*1.0im - (U,s,Vh) = svd(M) ii = Index(4) jj = Index(4) - S = Diagonal(s) - T = ITensor(IndexSet(ii,jj),Dense{ComplexF64}(vec(U*S*Vh))) - (U,S,Vh) = svd(T,ii;maxdim=2) - @test norm((U*S)*Vh-T)≈sqrt(s[3]^2+s[4]^2) + T = ITensor(Dense{ComplexF64}(vec(M)),IndexSet(ii,jj)) + U,S,V = svd(T,ii;maxdim=2) + @test norm(U*S*V-T)≈sqrt(s[3]^2+s[4]^2) end @testset "Test QR decomposition of an ITensor" begin From 6119d46017eac60e5e20bd9a22e2000e3fd6a0b2 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Wed, 4 Sep 2019 17:49:26 -0400 Subject: [PATCH 05/27] Get SVD working (tests fail because DenseTensor*DiagTensor not defined yet) --- src/decomp.jl | 10 +++- src/indexset.jl | 22 +++++++- src/itensor.jl | 12 ++--- src/tensor/dense.jl | 103 ++++++++++++++++++++++++++++--------- src/tensor/diag.jl | 3 +- src/tensor/tensor.jl | 31 +++++++---- test/test_itensor_dense.jl | 48 ++++++++--------- 7 files changed, 159 insertions(+), 70 deletions(-) diff --git a/src/decomp.jl b/src/decomp.jl index d34cb35374..04fccfbfec 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -143,14 +143,20 @@ arguments provided. The following keyword arguments are recognized: function svd(A::ITensor, Linds...; kwargs...) + utags::TagSet = get(kwargs,:utags,"Link,u") + vtags::TagSet = get(kwargs,:vtags,"Link,v") A,Lis,Ris = _permute_for_factorize(A,Linds...) Lpos,Rpos = decomp_permutation(inds(A),Lis,Ris) TU,TS,TV = svd(Tensor(A),Lpos,Rpos;kwargs...) - #Uis,Ustore,Sis,Sstore,Vis,Vstore = storage_svd(store(A),Lis,Ris;kwargs...) - U,S,V = ITensor(TU),ITensor(TS),ITensor(TV) u = commonindex(U,S) v = commonindex(S,V) + settags!(U,utags,u) + settags!(S,utags,u) + settags!(S,vtags,v) + settags!(V,vtags,v) + u = settags(u,utags) + v = settags(v,vtags) return U,S,V,u,v end diff --git a/src/indexset.jl b/src/indexset.jl index 09913d0b87..2dd29f068d 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -30,6 +30,12 @@ IndexSet(inds::NTuple{N,Index}) where {N} = IndexSet{N}(inds) inds(is::IndexSet) = is.inds +# This is to help with some generic programming in the Tensor +# code (it helps to construct an IndexSet(::NTuple{N,Index}) where the +# only known thing for dispatch is a concrete type such +# as IndexSet{4}) +base_type(::Type{T}) where {T<:IndexSet} = IndexSet + # Empty constructor IndexSet() = IndexSet(()) @@ -80,7 +86,8 @@ dag(is::IndexSet) = IndexSet(dag.(is.inds)) # Allow iteration Base.iterate(is::IndexSet{N},state::Int=1) where {N} = state > N ? nothing : (is[state], state+1) -Base.eltype(is::IndexSet) = Index +Base.eltype(is::Type{<:IndexSet}) = Index +Base.eltype(is::IndexSet) = eltype(typeof(is)) # Needed for findfirst (I think) Base.keys(is::IndexSet{N}) where {N} = 1:N @@ -361,6 +368,8 @@ function swapprime!(is::IndexSet, for n in pos if plev(is[n])==pl1 is[n] = setprime(is[n],pl2) + elseif plev(is[n])==pl2 + is[n] = setprime(is[n],pl1) end end return is @@ -433,6 +442,8 @@ function replacetags!(is::IndexSet, end replacetags(is, vargs...) = replacetags!(copy(is), vargs...) +# TODO: write more efficient version in terms +# of indexpositions like swapprime! function swaptags!(is::IndexSet, tags1, tags2, match = nothing) @@ -531,6 +542,14 @@ function permute(is::IndexSet{N},perm) where {N} return IndexSet(indsp) end +# Permute some other type by perm +# (for example, tuple, MVector, etc.) +# as long as the constructor accepts a tuple +function permute(is::T,perm) where {T} + indsp = ntuple(i->is[perm[i]], Val(length(is))) + return T(indsp) +end + """ getperm(col1,col2) @@ -572,6 +591,7 @@ permutation. """ function is_trivial_permutation(P) isperm(P) || error("Input is not a permutation") + # TODO: use `all(n->P[n]==n,1:length(P))`? N = length(P) for n = 1:N P[n]!=n && return false diff --git a/src/itensor.jl b/src/itensor.jl index ec76c30766..59ea4a27a5 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -316,11 +316,11 @@ end # f!(A::ITensor,vargs...) = ( f!(inds(A),vargs...); return A ) # end -prime(A::ITensor,vargs...)= ITensor(store(A),prime(inds(A),vargs...)) +# TODO: implement more in-place versions prime!(A::ITensor,vargs...)= ( prime!(inds(A),vargs...); return A ) - -adjoint(A::ITensor) = prime(A) +prime(A::ITensor,vargs...)= ITensor(store(A),prime(inds(A),vargs...)) +Base.adjoint(A::ITensor) = prime(A) setprime(A::ITensor,vargs...) = ITensor(store(A),setprime(inds(A),vargs...)) @@ -334,11 +334,11 @@ addtags(A::ITensor,vargs...) = ITensor(store(A),addtags(inds(A),vargs...)) removetags(A::ITensor,vargs...) = ITensor(store(A),removetags(inds(A),vargs...)) -replacetags(A::ITensor,vargs...) = ITensor(store(A),replacetags(inds(A),vargs...)) - replacetags!(A::ITensor,vargs...) = ( replacetags!(inds(A),vargs...); return A ) +replacetags(A::ITensor,vargs...) = ITensor(store(A),replacetags(inds(A),vargs...)) -settags(A::ITensor,vargs...) = ITensor(store(A),settags(inds(A),vargs...)) +settags!(A::ITensor,vargs...) = ( settags!(inds(A),vargs...); return A ) +settags(A::ITensor,vargs...) = ITensor(store(A),settags(inds(A))) swaptags(A::ITensor,vargs...) = ITensor(store(A),swaptags(inds(A),vargs...)) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 0f7519cf7c..d79725e85e 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -39,6 +39,23 @@ Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() Base.getindex(T::DenseTensor,i::Integer) = store(T)[i] Base.setindex!(T::DenseTensor,v,i::Integer) = (store(T)[i] = v) +function Base.convert(::Type{Tensor{ElR,NR,StoreR,IndsR}}, + T::Tensor{ElT,NT,StoreT,IndsT}) where {ElR,NR,StoreR<:Dense,IndsR, + ElT,NT,StoreT<:Dense,IndsT} + return Tensor(convert(StoreR,store(T)),inds(T)) +end + +# Reshape a DenseTensor using the specified dimensions +function Base.reshape(T::DenseTensor,dims) + dim(T)==dim(dims) || error("Total new dimension must be the same as the old dimension") + return Tensor(store(T),dims) +end +# This version fixes method ambiguity with AbstractArray reshape +function Base.reshape(T::DenseTensor,dims::Dims) + dim(T)==dim(dims) || error("Total new dimension must be the same as the old dimension") + return Tensor(store(T),dims) +end + # Create an Array that is a view of the Dense Tensor # Useful for using Base Array functions Base.Array(T::DenseTensor) = reshape(data(store(T)),dims(inds(T))) @@ -412,44 +429,79 @@ end @inline tuplejoin(x, y, z...) = (x..., tuplejoin(y, z...)...) # TODO: write this function -function permutereshape(T::DenseTensor,pos::NTuple{N,NTuple{<:Any,Int}}) where {N} +""" +combinedims(T::Tensor,pos) + +Takes a permutation that is split up into tuples. Index positions +within the tuples are combined. + +For example: + +permute_reshape(T,(3,2),1) + +First T is permuted as `permutedims(3,2,1)`, then reshaped such +that the original indices 3 and 2 are combined. +""" +function permute_reshape(T::DenseTensor{ElT,NT,IndsT},pos::Vararg{<:Any,N}) where {ElT,NT,IndsT,N} perm = tuplejoin(pos...) + dimsT = dims(T) + indsT = inds(T) if !is_trivial_permutation(perm) T = permutedims(T,perm) end - @show pos - @show perm - @show prod.(pos) - #newdims = dims(T)[perm] - Tr = reshape(T,prod.(pos)) - return Tr + N==NT && return T + newdims = MVector(ntuple(_->eltype(IndsT)(1),Val(N))) + for i ∈ 1:N + if length(pos[i])==1 + # No reshape needed, just use the + # original index + newdims[i] = indsT[pos[i][1]] + else + newdim_i = 1 + for p ∈ pos[i] + newdim_i *= dimsT[p] + end + newdims[i] = eltype(IndsT)(newdim_i) + end + end + newinds = base_type(IndsT)(Tuple(newdims)) + return reshape(T,newinds) end -function LinearAlgebra.svd(T::DenseTensor{<:Number,N}, - Lpos::NTuple{NL,<:Integer}, - Rpos::NTuple{NR,<:Integer}; - kwargs...) where {N,NL,NR} +# svd of an order-n tensor according to positions Lpos +# and Rpos +function LinearAlgebra.svd(T::DenseTensor{<:Number,N,IndsT}, + Lpos::NTuple{NL,Int}, + Rpos::NTuple{NR,Int}; + kwargs...) where {N,IndsT,NL,NR} NL+NR≠N && error("Index positions ($NL and $NR) must add up to order of Tensor ($N)") - M = permutereshape(T,(Lpos,Rpos)) + isperm(tuplejoin(Lpos,Rpos)) || error("Index positions must be a permutation") + M = permute_reshape(T,Lpos,Rpos) UM,S,VM = svd(M;kwargs...) u = ind(UM,2) - v = ind(VM,1) - Uinds = push(inds(T)[Lpos],u) - Vinds = push(inds(T)[Rpos],v) + v = ind(VM,2) + + Linds = base_type(IndsT)(ntuple(i->inds(T)[Lpos[i]],Val(NL))) + Uinds = push(Linds,u) + + # TODO: do these positions need to be reversed? + Rinds = base_type(IndsT)(ntuple(i->inds(T)[Rpos[i]],Val(NR))) + Vinds = push(Rinds,v) + U = reshape(UM,Uinds) V = reshape(VM,Vinds) + return U,S,V end -function LinearAlgebra.svd(T::DenseTensor{<:Number,2,IndsT}, - kwargs...) where {IndsT} - maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) +# svd of an order-2 tensor +function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; + kwargs...) where {ElT,IndsT} + maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) mindim::Int = get(kwargs,:mindim,1) cutoff::Float64 = get(kwargs,:cutoff,0.0) absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - #utags::String = get(kwargs,:utags,"Link,u") - #vtags::String = get(kwargs,:vtags,"Link,v") fastSVD::Bool = get(kwargs,:fastSVD,false) if fastSVD @@ -477,12 +529,13 @@ function LinearAlgebra.svd(T::DenseTensor{<:Number,2,IndsT}, u = eltype(IndsT)(dS) v = eltype(IndsT)(dS) - Uinds = push(ind(T,1),u) - Vinds = push(ind(T,2),v) + Uinds = IndsT((ind(T,1),u)) + Sinds = IndsT((u,v)) + Vinds = IndsT((ind(T,2),v)) - U = Tensor(Dense{T}(vec(MU)),Uinds) - S = Tensor(Diag{Vector{Float64}}(MS),IndsT(u,v)) - V = Tensor(Dense{T}(vec(MV)),Vinds) + U = Tensor(Dense{ElT}(vec(MU)),Uinds) + S = Tensor(Diag{Vector{Float64}}(MS),Sinds) + V = Tensor(Dense{ElT}(vec(MV)),Vinds) #u = Index(dS,utags) #v = settags(u,vtags) diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index d31934a15a..7c2451df6b 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -16,7 +16,8 @@ mutable struct Diag{T} <: TensorStorage end data(D::Diag) = D.data -eltype(D::Diag) = eltype(data(D)) +Base.eltype(::Diag{T}) where {T} = eltype(T) +Base.eltype(::Type{Diag{T}}) where {T} = eltype(T) getindex(D::Diag{T},i::Int) where {T<:AbstractVector}= data(D)[i] # Version of getindex for uniform (scalar) storage getindex(D::Diag{T},i::Int) where {T<:Number} = data(D) diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 1d303dccbd..52a81d3022 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -20,16 +20,21 @@ store(T::Tensor) = T.store inds(T::Tensor) = T.inds ind(T::Tensor,j::Integer) = inds(T)[j] -# This function is used in the AbstractArray interface -#Base.axes(T::Tensor) = Tuple(inds(T)) - +# dim and dims are used in the Tensor interface, overload +# base Dims here dims(ds::Dims) = ds dim(ds::Dims) = prod(ds) +# This is to help with some generic programming in the Tensor +# code (it helps to construct a Tuple(::NTuple{N,Int}) where the +# only known thing for dispatch is a concrete type such +# as Dims{4}) +base_type(::Type{T}) where {T<:Dims} = Dims + # The size is obtained from the indices -function Base.size(T::Tensor{StoreT,IndsT}) where {StoreT,IndsT} - return dims(inds(T)) -end +dims(T::Tensor) = dims(inds(T)) +dim(T::Tensor) = dim(inds(T)) +Base.size(T::Tensor) = dims(T) Base.copy(T::Tensor) = Tensor(copy(store(T)),copy(inds(T))) @@ -42,13 +47,17 @@ end # TODO: implement these versions Base.similar(T::Tensor) = Tensor(similar(store(T)),inds(T)) Base.similar(T::Tensor,dims) = Tensor(similar(store(T),dims),dims) +# To handle method ambiguity with AbstractArray +Base.similar(T::Tensor,dims::Dims) = Tensor(similar(store(T),dims),dims) Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) +# To handle method ambiguity with AbstractArray +Base.similar(T::Tensor,::Type{S},dims::Dims) where {S} = Tensor(similar(store(T),S),dims) -function Base.convert(::Type{Tensor{ElR,N,StoreR,Inds}}, - T::Tensor{ElT,N,StoreT,Inds}) where {ElR,ElT,N,StoreR,StoreT,Inds} - return Tensor(convert(StoreR,store(T)),inds(T)) -end +#function Base.convert(::Type{Tensor{<:Number,N,StoreR,Inds}}, +# T::Tensor{<:Number,N,<:Any,Inds}) where {N,Inds,StoreR} +# return Tensor(convert(StoreR,store(T)),inds(T)) +#end function Base.promote_rule(::Type{Tensor{ElT1,N,StoreT1,Inds}}, ::Type{Tensor{ElT2,N,StoreT2,Inds}}) where {ElT1,ElT2,N,StoreT1,StoreT2,Inds} @@ -58,7 +67,7 @@ end Base.BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() function Base.similar(bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{T}}, - ::Type{ElType}) where {T<:Tensor,ElType} + ::Type{<:Any}) where {T<:Tensor} A = find_tensor(bc) return similar(A) end diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index fea22ac9c3..c58f6857dd 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -224,20 +224,19 @@ end end @testset "Test isapprox for ITensors" begin - m,n = rand(0:20,2) - i = Index(m) - j = Index(n) - realData = rand(m,n) - complexData = realData+ zeros(m,n)*1im - A = ITensor(realData, i,j) - B = ITensor(complexData, i,j) - @test A≈B - @test B≈A - realDataT = Array(transpose(realData)) - A = ITensor(realDataT, j,i) - @test A≈B - @test B≈A - end + m,n = rand(0:20,2) + i = Index(m) + j = Index(n) + realData = rand(m,n) + complexData = complex(realData) + A = ITensor(realData, i,j) + B = ITensor(complexData, i,j) + @test A≈B + @test B≈A + A = permute(A,j,i) + @test A≈B + @test B≈A +end @testset "ITensor tagging and priming" begin s1 = Index(2,"Site,s=1") @@ -451,10 +450,9 @@ end end @testset "Test SVD truncation" begin - M = randn(4,4) + randn(4,4)*1.0im ii = Index(4) jj = Index(4) - T = ITensor(Dense{ComplexF64}(vec(M)),IndexSet(ii,jj)) + T = randomITensor(ComplexF64,ii,jj) U,S,V = svd(T,ii;maxdim=2) @test norm(U*S*V-T)≈sqrt(s[3]^2+s[4]^2) end @@ -477,15 +475,17 @@ end end end - # TODO: need to implement swapInds - #@testset "Test eigen decomposition of an ITensor" begin - # A = A + swapInds(dag(A),(i,k),(j,l)) - # U,D,u = eigen(A,(i,k),(j,l)) - # @test A≈U*D*prime(dag(U)) - # UUᵀ = U*prime(dag(U),u) - # @test UUᴴ ≈ δ(u,u') atol=1e-14 - #end + @testset "Test eigen decomposition of an ITensor" begin + T = randomITensor(i,j,i',j') + T = T + swapprime(dag(T),0,1) + U,D,u = eigen(T) + @test A≈U*D*prime(dag(U)) + UUᵀ = U*prime(dag(U),u) + @test UUᴴ ≈ δ(u,u') atol=1e-14 + end + end # End ITensor factorization testset + end # End Dense storage test end # End Dense ITensor basic functionality From 0940c48a99f5fcd76625ce8f73f9e4fa677c6e70 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Thu, 5 Sep 2019 17:31:27 -0400 Subject: [PATCH 06/27] Get dense decompositions working. Need to make DiagTensor --- src/decomp.jl | 200 ++++++----------- src/indexset.jl | 52 +++-- src/itensor.jl | 22 +- src/tensor/dense.jl | 427 ++++++++++++------------------------- src/tensor/tensor.jl | 18 +- test/test_itensor_dense.jl | 19 +- 6 files changed, 279 insertions(+), 459 deletions(-) diff --git a/src/decomp.jl b/src/decomp.jl index 04fccfbfec..7add719561 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -1,119 +1,38 @@ -export svd, - qr, - polar, - eigen, +export polar, + eigenHermitian, factorize -#function truncate!(P::Vector{Float64}; -# kwargs...)::Tuple{Float64,Float64} -# maxdim::Int = min(get(kwargs,:maxdim,length(P)), length(P)) -# mindim::Int = max(get(kwargs,:mindim,1), 1) -# cutoff::Float64 = max(get(kwargs,:cutoff,0.0), 0.0) -# absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) -# doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) -# -# origm = length(P) -# docut = 0.0 -# -# if P[1]<=0.0 -# P[1] = 0.0 -# resize!(P,1) -# return 0.,0. -# end -# -# if origm==1 -# docut = P[1]/2 -# return 0.,docut -# end -# -# #Zero out any negative weight -# for n=origm:-1:1 -# (P[n] >= 0.0) && break -# P[n] = 0.0 -# end -# -# n = origm -# truncerr = 0.0 -# while n > maxdim -# truncerr += P[n] -# n -= 1 -# end -# -# if absoluteCutoff -# #Test if individual prob. weights fall below cutoff -# #rather than using *sum* of discarded weights -# while P[n] <= cutoff && n > mindim -# truncerr += P[n] -# n -= 1 -# end -# else -# scale = 1.0 -# if doRelCutoff -# scale = sum(P) -# (scale==0.0) && (scale = 1.0) -# end -# -# #Continue truncating until *sum* of discarded probability -# #weight reaches cutoff reached (or m==mindim) -# while (truncerr+P[n] <= cutoff*scale) && (n > mindim) -# truncerr += P[n] -# n -= 1 -# end -# -# truncerr /= scale -# end -# -# if n < 1 -# n = 1 -# end -# -# if n < origm -# docut = (P[n]+P[n+1])/2 -# if abs(P[n]-P[n+1]) < 1E-3*P[n] -# docut += 1E-3*P[n] -# end -# end -# -# resize!(P,n) -# -# return truncerr,docut -#end - -# Take ITensor A, permute the storage so that -# Linds are in front, return the permuted A -# and the new left and right indices -function _permute_for_factorize(A::ITensor, - Linds...) - Ais = inds(A) - Lis_orig = IndexSet(Linds...) - Lis = commoninds(Ais,Lis_orig) - Ris = uniqueinds(Ais,Lis) - Ais_perm = IndexSet(Lis...,Ris...) - # TODO: check if hassameinds(Lis,Ais[1:length(Lis)]) - # so that a permute can be avoided - if inds(A) ≠ Ais_perm - A = permute(A,Ais_perm) - end - return A,Lis,Ris -end - import LinearAlgebra.qr function qr(A::ITensor, - Linds...) - A,Lis,Ris = _permute_for_factorize(A,Linds...) - Qis,Qstore,Pis,Pstore = storage_qr(store(A),Lis,Ris) - Q = ITensor(Qstore,Qid) - R = ITensor(Pstore,Pis) - return Q,R,commonindex(Q,R) + Linds...; + kwargs...) + tags::TagSet = get(kwargs,:tags,"Link,qr") + Lis = commoninds(inds(A),IndexSet(Linds...)) + Ris = uniqueinds(inds(A),Lis) + Lpos,Rpos = getperms(inds(A),Lis,Ris) + QT,RT = qr(Tensor(A),Lpos,Rpos) + Q,R = ITensor(QT),ITensor(RT) + q = commonindex(Q,R) + settags!(Q,tags,q) + settags!(R,tags,q) + q = settags(q,tags) + return Q,R,q end +# TODO: allow custom tags in internal indices? function polar(A::ITensor, - Linds...) - A,Lis,Ris = _permute_for_factorize(A,Linds...) - Qis,Qstore,Pis,Pstore = storage_polar(store(A),Lis,Ris) - Q = ITensor(Qstore,Qis) - P = ITensor(Pstore,Pis) - return Q,P,commoninds(Q,P) + Linds...; + kwargs...) + Lis = commoninds(inds(A),IndexSet(Linds...)) + Ris = uniqueinds(inds(A),Lis) + Lpos,Rpos = getperms(inds(A),Lis,Ris) + UT,PT = polar(Tensor(A),Lpos,Rpos) + U,P = ITensor(UT),ITensor(PT) + u = commoninds(U,P) + p = uniqueinds(P,U) + replaceinds!(U,u,p') + replaceinds!(P,u,p') + return U,P,commoninds(U,P) end import LinearAlgebra.svd @@ -145,10 +64,11 @@ function svd(A::ITensor, kwargs...) utags::TagSet = get(kwargs,:utags,"Link,u") vtags::TagSet = get(kwargs,:vtags,"Link,v") - A,Lis,Ris = _permute_for_factorize(A,Linds...) - Lpos,Rpos = decomp_permutation(inds(A),Lis,Ris) - TU,TS,TV = svd(Tensor(A),Lpos,Rpos;kwargs...) - U,S,V = ITensor(TU),ITensor(TS),ITensor(TV) + Lis = commoninds(inds(A),IndexSet(Linds...)) + Ris = uniqueinds(inds(A),Lis) + Lpos,Rpos = getperms(inds(A),Lis,Ris) + UT,ST,VT = svd(Tensor(A),Lpos,Rpos;kwargs...) + U,S,V = ITensor(UT),ITensor(ST),ITensor(VT) u = commonindex(U,S) v = commonindex(S,V) settags!(U,utags,u) @@ -200,7 +120,7 @@ end function _factorize_from_left_eigen(A::ITensor, Linds...; kwargs...) - A,Lis,Ris = _permute_for_factorize(A,Linds...) + Lis = commoninds(inds(A),IndexSet(Linds...)) A² = A*prime(dag(A),Lis) FU,D = eigen(A²,Lis,prime(Lis);kwargs...) FV = dag(FU)*A @@ -210,7 +130,7 @@ end function _factorize_from_right_eigen(A::ITensor, Linds...; kwargs...) - A,Lis,Ris = _permute_for_factorize(A,Linds...) + Ris = uniqueinds(inds(A),IndexSet(Linds...)) A² = A*prime(dag(A),Ris) FV,D = eigen(A²,Ris,prime(Ris); kwargs...) FU = A*dag(FV) @@ -252,24 +172,42 @@ function factorize(A::ITensor, throw(ArgumentError("In factorize, no dir = $dir supported. Use center, fromleft or fromright.")) end -# TODO: add a version that automatically detects the IndexSets -# by matching based on tags (default to matching 0 and 1 primed indices) +function eigenHermitian(A::ITensor, + Linds=findinds(A,"0"), + Rinds=prime(IndexSet(Linds)); + kwargs...) + tags::TagSet = get(kwargs,:tags,"Link,eigen") + Lis = commoninds(inds(A),IndexSet(Linds...)) + Ris = uniqueinds(inds(A),Lis) + Lpos,Rpos = getperms(inds(A),Lis,Ris) + UT,DT = eigenHermitian(Tensor(A),Lpos,Rpos) + U,D = ITensor(UT),ITensor(DT) + u = commonindex(U,D) + settags!(U,tags,u) + settags!(D,tags,u) + u = settags(u,tags) + v = uniqueindex(D,U) + D *= δ(v,u') + return U,D,u +end + import LinearAlgebra.eigen function eigen(A::ITensor, - Linds, - Rinds; + Linds=findinds(A,"0"), + Rinds=prime(IndexSet(Linds)); kwargs...) - Lis = IndexSet(Linds) - Ris = IndexSet(Rinds) - Ais_perm = IndexSet(Lis...,Ris...) - !hassameinds(Ais_perm,A) && throw(ErrorException("Input indices must be contained in the ITensor")) - if inds(A) ≠ Ais_perm - A = permute(A,Ais_perm) - end - #TODO: More of the index analysis should be moved out of storage_eigen - Uis,Ustore,Dis,Dstore = storage_eigen(store(A),Lis,Ris; kwargs...) - U = ITensor(Uis,Ustore) - D = ITensor(Dis,Dstore) - return U,D,commonindex(U,D) + tags::TagSet = get(kwargs,:tags,"Link,eigen") + Lis = commoninds(inds(A),IndexSet(Linds...)) + Ris = uniqueinds(inds(A),Lis) + Lpos,Rpos = getperms(inds(A),Lis,Ris) + UT,DT = eigen(Tensor(A),Lpos,Rpos) + U,D = ITensor(UT),ITensor(DT) + u = commonindex(U,D) + settags!(U,tags,u) + settags!(D,tags,u) + u = settags(u,tags) + v = uniqueindex(D,U) + D *= δ(v,u') + return U,D,u end diff --git a/src/indexset.jl b/src/indexset.jl index 2dd29f068d..05169d91d3 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -30,12 +30,6 @@ IndexSet(inds::NTuple{N,Index}) where {N} = IndexSet{N}(inds) inds(is::IndexSet) = is.inds -# This is to help with some generic programming in the Tensor -# code (it helps to construct an IndexSet(::NTuple{N,Index}) where the -# only known thing for dispatch is a concrete type such -# as IndexSet{4}) -base_type(::Type{T}) where {T<:IndexSet} = IndexSet - # Empty constructor IndexSet() = IndexSet(()) @@ -43,13 +37,18 @@ IndexSet() = IndexSet(()) IndexSet(::Val{N}) where {N} = IndexSet(ntuple(_->Index(),Val(N))) # Construct from various sets of indices -IndexSet(inds::Vararg{Index,N}) where {N} = IndexSet(inds) +IndexSet(inds::Vararg{Index,N}) where {N} = IndexSet{N}(NTuple{N,Index}(inds)) +IndexSet{N}(inds::Vararg{Index,N}) where {N} = IndexSet{N}(NTuple{N,Index}(inds)) # Construct from various sets of IndexSets IndexSet(inds::IndexSet) = inds IndexSet(inds::IndexSet,i::Index) = IndexSet(inds...,i) IndexSet(i::Index,inds::IndexSet) = IndexSet(i,inds...) IndexSet(is1::IndexSet,is2::IndexSet) = IndexSet(is1...,is2...) + +# TODO: make a version that accepts an arbitrary set of IndexSets +# as well as mixtures of seperate Indices and Tuples of Indices. +# Look at jointuples in the DenseTensor decomposition logic. IndexSet(inds::NTuple{2,IndexSet}) = IndexSet(inds...) # Convert to an Index if there is only one @@ -71,11 +70,6 @@ dims(is::IndexSet{N}) where {N} = ntuple(i->dim(is[i]),Val(N)) dim(is::IndexSet) = prod(dim.(is)) dim(is::IndexSet,pos::Integer) = dim(is[pos]) -# TODO: what should size(::IndexSet) do? -#size(is::IndexSet) = size(is.inds) -#Base.size(is::IndexSet) = dims(is) -#Base.size(is::IndexSet,pos::Integer) = dim(is,pos) - # Optimize this (right own function that extracts dimensions # with a function) Base.strides(is::IndexSet) = Base.size_to_strides(1, dims(is)...) @@ -92,8 +86,18 @@ Base.eltype(is::IndexSet) = eltype(typeof(is)) # Needed for findfirst (I think) Base.keys(is::IndexSet{N}) where {N} = 1:N -#push!(is::IndexSet,i::Index) = push!(is.inds,i) StaticArrays.push(is::IndexSet{N},i::Index) where {N} = IndexSet{N+1}(push(is.inds,i)) +StaticArrays.pushfirst(is::IndexSet{N},i::Index) where {N} = IndexSet{N+1}(pushfirst(is.inds,i)) + +unioninds(is1::IndexSet{N1},is2::IndexSet{N2}) where {N1,N2} = IndexSet{N1+N2}(is1...,is2...) + +# This is to help with some generic programming in the Tensor +# code (it helps to construct an IndexSet(::NTuple{N,Index}) where the +# only known thing for dispatch is a concrete type such +# as IndexSet{4}) +StaticArrays.similar_type(::Type{IndsT},::Val{N}) where {IndsT<:IndexSet,N} = IndexSet{N} + +sim(is::IndexSet{N}) where {N} = IndexSet{N}(ntuple(i->sim(is[i]),Val(N))) """ minDim(is::IndexSet) @@ -557,6 +561,7 @@ Get the permutation that takes collection 2 to collection 1, such that col2[p].==col1 """ function getperm(set1, set2) + # TODO: use perm = ntuple(i->findfirst(==(set2[i]),set1),Val(N)) l1 = length(set1) l2 = length(set2) l1==l2 || throw(DimensionMismatch("Mismatched input sizes in calcPerm: l1=$l1, l2=$l2")) @@ -573,6 +578,19 @@ function getperm(set1, set2) return p end +""" +getperm(col1,col2,col3) + +Get the permutations that takes collections 2 and 3 to collection 1. +""" +function getperms(s::IndexSet{N},s1::IndexSet{N1},s2::IndexSet{N2}) where {N1,N2,N} + N1+N2≠N && error("Size of partial sets don't match with total set") + perm1 = ntuple(i->findfirst(==(s1[i]),s),Val(N1)) + perm2 = ntuple(i->findfirst(==(s2[i]),s),Val(N2)) + isperm((perm1...,perm2...)) || error("Combined permutations are $((perm1...,perm2...)), not a valid permutation") + return perm1,perm2 +end + """ Determine if s1 and s2 have no overlapping elements. """ @@ -599,11 +617,3 @@ function is_trivial_permutation(P) return true end -function decomp_permutation(s::IndexSet{N},s1::IndexSet{N1},s2::IndexSet{N2}) where {N1,N2,N} - N1+N2≠N && error("Size of partial sets don't match with total set") - perm1 = ntuple(i->findfirst(==(s1[i]),s),Val(N1)) - perm2 = ntuple(i->findfirst(==(s2[i]),s),Val(N2)) - isperm((perm1...,perm2...)) || error("Combined permutations are $((perm1...,perm2...)), not a valid permutation") - return perm1,perm2 -end - diff --git a/src/itensor.jl b/src/itensor.jl index 59ea4a27a5..07324a4df6 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -269,7 +269,7 @@ function Base.getindex(T::ITensor{N},ivs::Vararg{IndexVal,N}) where {N} return T[vals...] end -# TODO: what is this? +# TODO: what is this doing? #function getindex(T::ITensor,ivs::Union{IndexVal, AbstractVector{IndexVal}}...) # p = getperm(inds(T),map(x->x isa IndexVal ? x : x[1], ivs)) # vals = map(x->x isa IndexVal ? val(x) : val.(x), ivs[p]) @@ -286,6 +286,7 @@ function Base.setindex!(T::ITensor,x::Number,ivs::IndexVal...) return T[vals...] = x end +# TODO: what was this doing? #function setindex!(T::ITensor, # x::Union{<:Number, AbstractArray{<:Number}}, # ivs::Union{IndexVal, AbstractVector{IndexVal}}...) @@ -310,6 +311,16 @@ function replaceindex!(A::ITensor,i::Index,j::Index) return A end +function replaceinds!(A::ITensor,inds1,inds2) + is1 = IndexSet(inds1) + is2 = IndexSet(inds2) + pos = indexpositions(A,is1) + for (j,p) ∈ enumerate(pos) + inds(A)[p] = is2[j] + end + return A +end + # TODO: can we turn these into a macro? # for f ∈ (prime,setprime,noprime,...) # f(A::ITensor,vargs...) = ITensor(store(A),f(inds(A),vargs...)) @@ -360,6 +371,9 @@ function Base.isapprox(A::ITensor, return norm(A-B) <= atol + rtol*max(norm(A),norm(B)) end +# TODO: bring this back or just use T[]? +# I think T[] may not be generic, since it may only work if order(T)==0 +# so it may be nice to have a seperate scalar(T) for when dim(T)==1 #function scalar(T::ITensor) # !(order(T)==0 || dim(T)==1) && throw(ArgumentError("ITensor with inds $(inds(T)) is not a scalar")) # return scalar(Tensor(store(T),inds(T))) @@ -425,14 +439,8 @@ end function *(A::ITensor,B::ITensor) (Alabels,Blabels) = compute_contraction_labels(inds(A),inds(B)) - # TODO: implement contract CT = contract(Tensor(A),Alabels,Tensor(B),Blabels) C = ITensor(CT) - - # OLD INTERFACE - ###(Cis,Cstore) = storage_contract(store(A),inds(A),store(B),inds(B)) - ###C = ITensor(Cis,Cstore) - if warnTensorOrder > 0 && order(C) >= warnTensorOrder println("Warning: contraction resulted in ITensor with $(order(C)) indices") end diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index d79725e85e..79cf953e43 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -1,12 +1,12 @@ export Dense +# +# Dense storage +# + struct Dense{T} <: TensorStorage data::Vector{T} Dense{T}(data::Vector) where {T} = new{T}(convert(Vector{T},data)) - #Dense(data::Vector{T}) where {T} = new{T}(data) - #Dense{T}(size::Integer) where {T} = new{T}(Vector{T}(undef,size)) - #Dense{T}(x::Number,size::Integer) where {T} = new{T}(fill(T(x),size)) - #Dense(x::T,size::Integer) where {T<:Number} = new{T}(fill(T,size)) Dense{T}() where {T} = new{T}(Vector{T}()) end @@ -32,6 +32,10 @@ Base.eltype(::Type{Dense{T}}) where {T} = eltype(T) Base.promote_rule(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} Base.convert(::Type{Dense{R}},D::Dense) where {R} = Dense{R}(convert(Vector{R},data(D))) +# +# DenseTensor (Tensor using Dense storage) +# + const DenseTensor{El,N,Inds} = Tensor{El,N,<:Dense,Inds} # Basic functionality for AbstractArray interface @@ -83,6 +87,7 @@ using Base.Cartesian: @nexprs, # # A generalized permutedims!(P,B,perm) that also allows # a function to be applied elementwise +# TODO: benchmark to make sure it is similar to Base.permutedims! # # Based off of the permutedims! implementation in Julia's base: # https://github.com/JuliaLang/julia/blob/91151ab871c7e7d6689d1cfa793c12062d37d6b6/base/multidimensional.jl#L1355 @@ -122,147 +127,6 @@ function outer(T1::DenseTensor,T2::DenseTensor) end const ⊗ = outer -# Do we actually need all of these? -#TODO: this should do proper promotions of the storage data -#e.g. ComplexF64*Dense{Float64} -> Dense{ComplexF64} -#*(D::Dense{T},x::S) where {T,S<:Number} = Dense{promote_type(T,S)}(x*data(D)) -#*(x::Number,D::Dense) = D*x - -#Base.promote_type(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} - -#convert(::Type{Dense{T}},D::Dense) where {T} = Dense{T}(data(D)) - -#storage_convert(::Type{Dense{T}},D::Dense,is::IndexSet) where {T} = convert(Dense{T},D) - -# convert to complex -#storage_complex(D::Dense{T}) where {T} = Dense{complex(T)}(complex(data(D))) - -#function storage_outer(D1::Dense{T},is1::IndexSet,D2::Dense{S},is2::IndexSet) where {T, S <:Number} -# return Dense{promote_type(T,S)}(vec(data(D1)*transpose(data(D2)))),IndexSet(is1,is2) -#end - -#storage_convert(::Type{Array},D::Dense,is::IndexSet) = reshape(data(D),dims(is)) - -#function storage_convert(::Type{Array}, -# D::Dense, -# ois::IndexSet, -# nis::IndexSet) -# P = calculate_permutation(nis,ois) -# A = reshape(data(D),dims(ois)) -# return permutedims(A,P) -#end - -#storage_fill!(D::Dense,x::Number) = fill!(data(D),x) -# -#function storage_getindex(Tstore::Dense{T}, -# Tis::IndexSet, -# vals::Union{Int, AbstractVector{Int}}...) where {T} -# return getindex(reshape(data(Tstore),dims(Tis)),vals...) -#end -# -#function storage_setindex!(Tstore::Dense, -# Tis::IndexSet, -# x::Union{<:Number, AbstractArray{<:Number}}, -# vals::Union{Int, AbstractVector{Int}}...) -# return setindex!(reshape(data(Tstore),dims(Tis)),x,vals...) -#end - -#function _add!(Bstore::Dense, -# Bis::IndexSet, -# Astore::Dense, -# Ais::IndexSet, -# x::Number = 1) -# p = getperm(Bis,Ais) -# Adata = data(Astore) -# Bdata = data(Bstore) -# if is_trivial_permutation(p) -# if x == 1 -# Bdata .+= Adata -# else -# Bdata .= Bdata .+ x .* Adata -# end -# else -# reshapeBdata = reshape(Bdata,dims(Bis)) -# reshapeAdata = reshape(Adata,dims(Ais)) -# if x == 1 -# _add!(reshapeBdata,reshapeAdata,p) -# else -# _add!(reshapeBdata,reshapeAdata,p,(a,b)->a+x*b) -# end -# end -#end - -#function storage_copyto!(Bstore::Dense, -# Bis::IndexSet, -# Astore::Dense, -# Ais::IndexSet, -# x::Number = 1) -# p = getperm(Bis,Ais) -# Adata = data(Astore) -# Bdata = data(Bstore) -# if is_trivial_permutation(p) -# if x == 1 -# Bdata .= Adata -# else -# Bdata .= x .* Adata -# end -# else -# reshapeBdata = reshape(Bdata,dims(Bis)) -# reshapeAdata = reshape(Adata,dims(Ais)) -# if x == 1 -# _add!(reshapeBdata,reshapeAdata,p,(a,b)->b) -# else -# _add!(reshapeBdata,reshapeAdata,p,(a,b)->x*b) -# end -# end -#end - -#function storage_mult!(Astore::Dense, -# x::Number) -# Adata = data(Astore) -# rmul!(Adata, x) -#end - -#function storage_mult(Astore::Dense, -# x::Number) -# Bstore = copy(Astore) -# storage_mult!(Bstore, x) -# return Bstore -#end - -# For Real storage and complex scalar, promotion -# of the storage is needed -#function storage_mult(Astore::Dense{T}, -# x::S) where {T<:Real,S<:Complex} -# Bstore = convert(Dense{promote_type(S,T)},Astore) -# storage_mult!(Bstore, x) -# return Bstore -#end - -## TODO: make this a special version of storage_add!() -## Make sure the permutation is optimized -#function storage_permute!(Bstore::Dense, Bis::IndexSet, -# Astore::Dense, Ais::IndexSet) -# p = calculate_permutation(Bis,Ais) -# Adata = data(Astore) -# Bdata = data(Bstore) -# if is_trivial_permutation(p) -# Bdata .= Adata -# else -# reshapeBdata = reshape(Bdata,dims(Bis)) -# permutedims!(reshapeBdata,reshape(Adata,dims(Ais)),p) -# end -#end - -#function storage_dag(Astore::Dense,Ais::IndexSet) -# return dag(Ais),storage_conj(Astore) -#end - -#function storage_scalar(D::Dense) -# length(D)==1 && return D[1] -# throw(ErrorException("Cannot convert Dense -> Number for length of data greater than 1")) -#end - function contract(T1::DenseTensor,labelsT1, T2::DenseTensor,labelsT2) indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) @@ -299,6 +163,9 @@ function contract!(R::DenseTensor,labelsR, return R end +# TODO: make sure this is doing type promotion correctly +# since we are calling BLAS (need to promote T1 and T2 to +# the same types) function _contract!(R::DenseTensor,labelsR, T1::DenseTensor,labelsT1, T2::DenseTensor,labelsT2) @@ -308,6 +175,8 @@ function _contract!(R::DenseTensor,labelsR, return R end +# TODO: move this directly into contract? Or write the signature +# in terms of Tensor (so that it can be used in block sparse contractions) function _contract_dense_dense!(C::Array{T}, p::ContractionProperties, A::Array{T}, @@ -367,63 +236,8 @@ function _contract_dense_dense!(C::Array{T}, return end -function _contract_scalar!(Cdata::Array,Clabels::Vector{Int}, - Bdata::Array,Blabels::Vector{Int},α,β) - p = calculate_permutation(Blabels,Clabels) - if β==0 - if is_trivial_permutation(p) - Cdata .= α.*Bdata - else - #TODO: make an optimized permutedims!() that also scales the data - permutedims!(Cdata,α*Bdata) - end - else - if is_trivial_permutation(p) - Cdata .= α.*Bdata .+ β.*Cdata - else - #TODO: make an optimized permutedims!() that also adds and scales the data - permBdata = permutedims(Bdata,p) - Cdata .= α.*permBdata .+ β.*Cdata - end - end - return -end - -#function _contract(Cinds::IndexSet, Clabels::Vector{Int}, -# Astore::Dense{SA}, Ainds::IndexSet, Alabels::Vector{Int}, -# Bstore::Dense{SB}, Binds::IndexSet, Blabels::Vector{Int}) where {SA<:Number,SB<:Number} -# SC = promote_type(SA,SB) -# -# # Convert the arrays to a common type -# # since we will call BLAS -# Astore = convert(Dense{SC},Astore) -# Bstore = convert(Dense{SC},Bstore) -# -# Adims = dims(Ainds) -# Bdims = dims(Binds) -# Cdims = dims(Cinds) -# -# # Create storage for output tensor -# Cstore = Dense{SC}(prod(Cdims)) -# -# Adata = reshape(data(Astore),Adims) -# Bdata = reshape(data(Bstore),Bdims) -# Cdata = reshape(data(Cstore),Cdims) -# -# if(length(Alabels)==0) -# contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) -# elseif(length(Blabels)==0) -# contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) -# else -# props = ContractionProperties(Alabels,Blabels,Clabels) -# compute_contraction_properties!(props,Adata,Bdata,Cdata) -# _contract_dense_dense!(Cdata,props,Adata,Bdata) -# end -# -# return Cstore -#end - # Combine a bunch of tuples +# TODO: move this functionality to IndexSet, combine with unioninds? @inline tuplejoin(x) = x @inline tuplejoin(x, y) = (x..., y...) @inline tuplejoin(x, y, z...) = (x..., tuplejoin(y, z...)...) @@ -444,6 +258,10 @@ that the original indices 3 and 2 are combined. """ function permute_reshape(T::DenseTensor{ElT,NT,IndsT},pos::Vararg{<:Any,N}) where {ElT,NT,IndsT,N} perm = tuplejoin(pos...) + + length(perm)≠NT && error("Index positions must add up to order of Tensor ($N)") + isperm(perm) || error("Index positions must be a permutation") + dimsT = dims(T) indsT = inds(T) if !is_trivial_permutation(perm) @@ -464,7 +282,7 @@ function permute_reshape(T::DenseTensor{ElT,NT,IndsT},pos::Vararg{<:Any,N}) wher newdims[i] = eltype(IndsT)(newdim_i) end end - newinds = base_type(IndsT)(Tuple(newdims)) + newinds = similar_type(IndsT,Val(N))(Tuple(newdims)) return reshape(T,newinds) end @@ -474,18 +292,16 @@ function LinearAlgebra.svd(T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs...) where {N,IndsT,NL,NR} - NL+NR≠N && error("Index positions ($NL and $NR) must add up to order of Tensor ($N)") - isperm(tuplejoin(Lpos,Rpos)) || error("Index positions must be a permutation") M = permute_reshape(T,Lpos,Rpos) UM,S,VM = svd(M;kwargs...) u = ind(UM,2) v = ind(VM,2) - Linds = base_type(IndsT)(ntuple(i->inds(T)[Lpos[i]],Val(NL))) + Linds = similar_type(IndsT,Val(NL))(ntuple(i->inds(T)[Lpos[i]],Val(NL))) Uinds = push(Linds,u) # TODO: do these positions need to be reversed? - Rinds = base_type(IndsT)(ntuple(i->inds(T)[Rpos[i]],Val(NR))) + Rinds = similar_type(IndsT,Val(NR))(ntuple(i->inds(T)[Rpos[i]],Val(NR))) Vinds = push(Rinds,v) U = reshape(UM,Uinds) @@ -512,7 +328,6 @@ function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; conj!(MV) P = MS.^2 - #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff truncate!(P;mindim=mindim, maxdim=maxdim, cutoff=cutoff, @@ -528,92 +343,132 @@ function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; # Make the new indices to go onto U and V u = eltype(IndsT)(dS) v = eltype(IndsT)(dS) - Uinds = IndsT((ind(T,1),u)) Sinds = IndsT((u,v)) Vinds = IndsT((ind(T,2),v)) - U = Tensor(Dense{ElT}(vec(MU)),Uinds) - S = Tensor(Diag{Vector{Float64}}(MS),Sinds) + S = Tensor(Diag{Vector{real(ElT)}}(MS),Sinds) V = Tensor(Dense{ElT}(vec(MV)),Vinds) + return U,S,V +end - #u = Index(dS,utags) - #v = settags(u,vtags) - #Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) - #TODO: make a diag storage - #Sis,Sstore = IndexSet(u,v),Diag{Vector{Float64}}(MS) - #Vis,Vstore = IndexSet(Ris...,v),Dense{T}(Vector{T}(vec(MV))) +# eigendecomposition of an order-n tensor according to positions Lpos +# and Rpos +function eigenHermitian(T::DenseTensor{<:Number,N,IndsT}, + Lpos::NTuple{NL,Int}, + Rpos::NTuple{NR,Int}; + kwargs...) where {N,IndsT,NL,NR} + M = permute_reshape(T,Lpos,Rpos) + UM,D = eigenHermitian(M;kwargs...) + u = ind(UM,2) + Linds = similar_type(IndsT,Val(NL))(ntuple(i->inds(T)[Lpos[i]],Val(NL))) + Uinds = push(Linds,u) + U = reshape(UM,Uinds) + return U,D +end - return U,S,V +function eigenHermitian(T::DenseTensor{ElT,2,IndsT}; + kwargs...) where {ElT,IndsT} + maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) + mindim::Int = get(kwargs,:mindim,1) + cutoff::Float64 = get(kwargs,:cutoff,0.0) + absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) + doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) + #tags::TagSet = get(kwargs,:lefttags,"Link,u") + #lefttags::TagSet = get(kwargs,:lefttags,tags) + #righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) + + DM,UM = eigen(Hermitian(Matrix(T))) + + # Sort by largest to smallest eigenvalues + p = sortperm(DM; rev = true) + DM = DM[p] + UM = UM[:,p] + + truncate!(DM;maxdim=maxdim, + cutoff=cutoff, + absoluteCutoff=absoluteCutoff, + doRelCutoff=doRelCutoff) + dD = length(DM) + if dD < size(UM,2) + UM = UM[:,1:dD] + end + + # Make the new indices to go onto U and V + u = eltype(IndsT)(dD) + v = eltype(IndsT)(dD) + Uinds = IndsT((ind(T,1),u)) + Dinds = IndsT((u,v)) + U = Tensor(Dense{ElT}(vec(UM)),Uinds) + D = Tensor(Diag{Vector{real(ElT)}}(DM),Dinds) + return U,D end -#function storage_eigen(Astore::Dense{T}, -# Lis::IndexSet, -# Ris::IndexSet; -# kwargs...) where {T} -# maxdim::Int = get(kwargs,:maxdim,min(dim(Lis),dim(Ris))) -# mindim::Int = get(kwargs,:mindim,1) -# cutoff::Float64 = get(kwargs,:cutoff,0.0) -# absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) -# doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) -# tags::TagSet = get(kwargs,:lefttags,"Link,u") -# lefttags::TagSet = get(kwargs,:lefttags,tags) -# righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) -# -# dim_left = dim(Lis) -# dim_right = dim(Ris) -# MD,MU = eigen(Hermitian(reshape(data(Astore),dim_left,dim_right))) -# -# # Sort by largest to smallest eigenvalues -# p = sortperm(MD; rev = true) -# MD = MD[p] -# MU = MU[:,p] -# -# #@printf " Truncating with maxdim=%d cutoff=%.3E\n" maxdim cutoff -# truncate!(MD;maxdim=maxdim, -# cutoff=cutoff, -# absoluteCutoff=absoluteCutoff, -# doRelCutoff=doRelCutoff) -# dD = length(MD) -# if dD < size(MU,2) -# MU = MU[:,1:dD] -# end -# -# #TODO: include truncation parameters as keyword arguments -# u = Index(dD,lefttags) -# v = settags(u,righttags) -# Uis,Ustore = IndexSet(Lis...,u),Dense{T}(vec(MU)) -# Dis,Dstore = IndexSet(u,v),Diag{Vector{Float64}}(MD) -# return (Uis,Ustore,Dis,Dstore) -#end - -#function storage_qr(Astore::Dense{T}, -# Lis::IndexSet, -# Ris::IndexSet; -# kwargs...) where {T} -# tags::TagSet = get(kwargs,:tags,"Link,u") -# dim_left = dim(Lis) -# dim_right = dim(Ris) -# MQ,MP = qr(reshape(data(Astore),dim_left,dim_right)) -# dim_middle = min(dim_left,dim_right) -# u = Index(dim_middle,tags) -# #Must call Matrix() on MQ since the QR decomposition outputs a sparse -# #form of the decomposition -# Qis,Qstore = IndexSet(Lis...,u),Dense{T}(vec(Matrix(MQ))) -# Pis,Pstore = IndexSet(u,Ris...),Dense{T}(vec(Matrix(MP))) -# return (Qis,Qstore,Pis,Pstore) -#end -# -#function storage_polar(Astore::Dense{T}, -# Lis::IndexSet, -# Ris::IndexSet) where {T} -# dim_left = dim(Lis) -# dim_right = dim(Ris) -# MQ,MP = polar(reshape(data(Astore),dim_left,dim_right)) -# dim_middle = min(dim_left,dim_right) -# Uis = prime(Ris) -# Qis,Qstore = IndexSet(Lis...,Uis...),Dense{T}(vec(MQ)) -# Pis,Pstore = IndexSet(Uis...,Ris...),Dense{T}(vec(MP)) -# return (Qis,Qstore,Pis,Pstore) -#end +# qr decomposition of an order-n tensor according to positions Lpos +# and Rpos +function LinearAlgebra.qr(T::DenseTensor{<:Number,N,IndsT}, + Lpos::NTuple{NL,Int}, + Rpos::NTuple{NR,Int}) where {N,IndsT,NL,NR} + M = permute_reshape(T,Lpos,Rpos) + QM,RM = qr(M) + q = ind(QM,2) + r = ind(RM,1) + Linds = similar_type(IndsT,Val(NL))(ntuple(i->inds(T)[Lpos[i]],Val(NL))) + Qinds = push(Linds,q) + Q = reshape(QM,Qinds) + Rinds = similar_type(IndsT,Val(NR))(ntuple(i->inds(T)[Rpos[i]],Val(NR))) + Rinds = pushfirst(Rinds,r) + R = reshape(RM,Rinds) + return Q,R +end + +function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; + kwargs...) where {ElT,IndsT} + QM,RM = qr(Matrix(T)) + dim = size(QM,2) + # Make the new indices to go onto Q and R + q = eltype(IndsT)(dim) + Qinds = IndsT((ind(T,1),q)) + Rinds = IndsT((q,ind(T,2))) + Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) + R = Tensor(Dense{ElT}(vec(Matrix(RM))),Rinds) + return Q,R +end + +# polar decomposition of an order-n tensor according to positions Lpos +# and Rpos +function polar(T::DenseTensor{<:Number,N,IndsT}, + Lpos::NTuple{NL,Int}, + Rpos::NTuple{NR,Int}) where {N,IndsT,NL,NR} + M = permute_reshape(T,Lpos,Rpos) + UM,PM = polar(M) + + Linds = similar_type(IndsT,Val(NL))(ntuple(i->inds(T)[Lpos[i]],Val(NL))) + Rinds = similar_type(IndsT,Val(NR))(ntuple(i->inds(T)[Rpos[i]],Val(NR))) + + # Use sim to create "similar" indices, in case + # the indices have identifiers. If not this should + # act as an identity operator + Rinds_sim = sim(Rinds) + + Uinds = unioninds(Linds,Rinds_sim) + Pinds = unioninds(Rinds_sim,Rinds) + + U = reshape(UM,Uinds) + P = reshape(PM,Pinds) + return U,P +end + +function polar(T::DenseTensor{ElT,2,IndsT}; + kwargs...) where {ElT,IndsT} + QM,RM = polar(Matrix(T)) + dim = size(QM,2) + # Make the new indices to go onto Q and R + q = eltype(IndsT)(dim) + Qinds = IndsT((ind(T,1),q)) + Rinds = IndsT((q,ind(T,2))) + Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) + R = Tensor(Dense{ElT}(vec(Matrix(RM))),Rinds) + return Q,R +end diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 52a81d3022..0b2a4c76c5 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -29,7 +29,9 @@ dim(ds::Dims) = prod(ds) # code (it helps to construct a Tuple(::NTuple{N,Int}) where the # only known thing for dispatch is a concrete type such # as Dims{4}) -base_type(::Type{T}) where {T<:Dims} = Dims +StaticArrays.similar_type(::Type{IndsT},::Val{N}) where {IndsT<:Dims,N} = Dims{N} + +unioninds(is1::Dims{N1},is2::Dims{N2}) where {N1,N2} = Dims{N1+N2}(is1...,is2...) # The size is obtained from the indices dims(T::Tensor) = dims(inds(T)) @@ -38,25 +40,25 @@ Base.size(T::Tensor) = dims(T) Base.copy(T::Tensor) = Tensor(copy(store(T)),copy(inds(T))) -function Base.similar(T::Type{Tensor{ElT,N,StoreT,IndsT}},dims) where {ElT,N,StoreT,IndsT} +function Base.similar(::Type{T},dims) where {T<:Tensor{<:Any,<:Any,StoreT}} where {StoreT} return Tensor(similar(StoreT,dims),dims) end +# TODO: make sure these are implemented correctly #Base.similar(T::Type{<:Tensor},::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) #Base.similar(T::Type{<:Tensor},::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) -# TODO: implement these versions -Base.similar(T::Tensor) = Tensor(similar(store(T)),inds(T)) +Base.similar(T::Tensor) = Tensor(similar(store(T)),copy(inds(T))) Base.similar(T::Tensor,dims) = Tensor(similar(store(T),dims),dims) # To handle method ambiguity with AbstractArray Base.similar(T::Tensor,dims::Dims) = Tensor(similar(store(T),dims),dims) -Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),inds(T)) +Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T),S),copy(inds(T))) Base.similar(T::Tensor,::Type{S},dims) where {S} = Tensor(similar(store(T),S),dims) # To handle method ambiguity with AbstractArray Base.similar(T::Tensor,::Type{S},dims::Dims) where {S} = Tensor(similar(store(T),S),dims) #function Base.convert(::Type{Tensor{<:Number,N,StoreR,Inds}}, # T::Tensor{<:Number,N,<:Any,Inds}) where {N,Inds,StoreR} -# return Tensor(convert(StoreR,store(T)),inds(T)) +# return Tensor(convert(StoreR,store(T)),copy(inds(T))) #end function Base.promote_rule(::Type{Tensor{ElT1,N,StoreT1,Inds}}, @@ -80,8 +82,6 @@ find_tensor(x) = x find_tensor(a::Tensor, rest) = a find_tensor(::Any, rest) = find_tensor(rest) -#Base.similar(T::Tensor) = Tensor(similar(store(T)),copy(inds(T))) -#Base.similar(T::Tensor,::Type{S}) where {S} = Tensor(similar(store(T)),copy(inds(T))) - +# TODO: implement some generic fallbacks for necessary parts of the API? #Base.getindex(A::TensorT, i::Int) where {TensorT<:Tensor} = error("getindex not yet implemented for Tensor type $TensorT") diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index c58f6857dd..f1e2ae203d 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -470,15 +470,24 @@ end #Note: this is only satisfied when left dimensions #are greater than right dimensions UUᵀ = U*dag(prime(U,u)) - for ii ∈ dim(u[1]), jj ∈ dim(u[2]) - @test UUᵀ[u[1](ii),u[2](jj),u[1]'(ii),u[2]'(jj)]≈one(SType) atol=1e-14 + + # TODO: use a combiner to combine the u indices to make + # this test simpler + for ii ∈ 1:dim(u[1]), jj ∈ 1:dim(u[2]), iip ∈ 1:dim(u[1]), jjp ∈ 1:dim(u[2]) + val = UUᵀ[u[1](ii),u[2](jj),u[1]'(iip),u[2]'(jjp)] + if ii==iip && jj==jjp + @test val≈one(SType) atol=1e-14 + else + @test val≈zero(SType) atol=1e-14 + end end end - @testset "Test eigen decomposition of an ITensor" begin - T = randomITensor(i,j,i',j') + @testset "Test Hermitian eigendecomposition of an ITensor" begin + is = IndexSet(i,j) + T = randomITensor(is...,prime(is)...) T = T + swapprime(dag(T),0,1) - U,D,u = eigen(T) + U,D,u = eigenHermitian(T) @test A≈U*D*prime(dag(U)) UUᵀ = U*prime(dag(U),u) @test UUᴴ ≈ δ(u,u') atol=1e-14 From ffec17a111bb501d2d1ff4917fa04787b5997d94 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Mon, 9 Sep 2019 17:16:34 -0400 Subject: [PATCH 07/27] More progress on DiagTensor --- src/indexset.jl | 4 + src/itensor.jl | 20 ++-- src/tensor/dense.jl | 43 ++++++--- src/tensor/diag.jl | 221 +++++++++++++++++++++++++++---------------- src/tensor/tensor.jl | 2 + 5 files changed, 189 insertions(+), 101 deletions(-) diff --git a/src/indexset.jl b/src/indexset.jl index 05169d91d3..f1296456ad 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -578,6 +578,10 @@ function getperm(set1, set2) return p end +function getperm(s1::IndexSet{N},s2::IndexSet{N}) where {N} + return ntuple(i->findfirst(==(s2[i]),s1),Val(N)) +end + """ getperm(col1,col2,col3) diff --git a/src/itensor.jl b/src/itensor.jl index 07324a4df6..fdaf0adc1a 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -24,8 +24,9 @@ mutable struct ITensor{N} inds::IndexSet{N} #TODO: check that the storage is consistent with the #total dimension of the indices (possibly only in debug mode); - ITensor(st,is::IndexSet{N}) where {N} = new{N}(st,is) + ITensor{N}(st,is::IndexSet{N}) where {N} = new{N}(st,is) end +ITensor(st,is::IndexSet{N}) where {N} = ITensor{N}(st,is) inds(T::ITensor) = T.inds store(T::ITensor) = T.store @@ -39,11 +40,13 @@ setstore!(T::ITensor,st::TensorStorage) = (T.store = st) # ITensor(T::Tensor) = ITensor(store(T),inds(T)) +ITensor{N}(T::Tensor{<:Number,N}) where {N} = ITensor(store(T),inds(T)) ITensor() = ITensor(Dense{Nothing}(),IndexSet()) ITensor(is::IndexSet) = ITensor(Float64,is...) ITensor(inds::Index...) = ITensor(IndexSet(inds...)) +# TODO: add versions where the types can be specified Tensor(A::ITensor) = Tensor(store(A),inds(A)) function ITensor(::Type{T}, @@ -83,7 +86,7 @@ The storage will have Diag type. """ function diagITensor(::Type{T}, is::IndexSet) where {T<:Number} - return ITensor(Diag{Vector{T}}(zero(T),minDim(is)),is) + return ITensor(Diag{Vector{T}}(zeros(T,minDim(is))),is) end """ @@ -129,7 +132,7 @@ Make a sparse ITensor of element type Float64 with non-zero elements only along the diagonal. Defaults to storing zeros along the diagonal. The storage will have Diag type. """ -diagITensor(is::IndexSet) = ITensor(Diag{Vector{Float64}}(zero(Float64),minDim(is)),is) +diagITensor(is::IndexSet) = diagITensor(Float64,is) """ diagITensor(is::Index...) @@ -150,7 +153,7 @@ The storage will have Diag type. """ function diagITensor(x::T, is::IndexSet) where {T<:Number} - return ITensor(Diag{Vector{float(T)}}(float(x),minDim(is)),is) + return ITensor(Diag{Vector{float(T)}}(fill(float(x),minDim(is))),is) end """ @@ -239,8 +242,9 @@ Base.size(A::ITensor{N}, d::Int) where {N} = d in 1:N ? dim(inds(A)[d]) : isNull(T::ITensor) = (eltype(T) === Nothing) -Base.copy(T::ITensor) = ITensor(copy(Tensor(T))) +Base.copy(T::ITensor{N}) where {N} = ITensor{N}(copy(Tensor(T)))::ITensor{N} +# TODO: make versions where the element type can be specified Base.Array(T::ITensor) = Array(Tensor(T)) Base.Matrix(T::ITensor{N}) where {N} = (N==2 ? Array(Tensor(T)) : throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix"))) @@ -490,8 +494,12 @@ add!(R::ITensor,T::ITensor) = add!(R,1,T) function add!(R::ITensor{N},α::Number,T::ITensor{N}) where {N} perm = getperm(inds(R),inds(T)) TR,TT = Tensor(R),Tensor(T) + + # TODO: make this into a permutedims!?(Tensor,Tensor,perm,f) function? + # Include type promotion from α TR = convert(promote_type(typeof(TR),typeof(TT)),TR) - TR = permutedims!(TR,TT,perm,(r,t)->r+α*t) + TR = permutedims!!(TR,TT,perm,(r,t)->r+α*t) + setstore!(R,store(TR)) setinds!(R,inds(TR)) return R diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 79cf953e43..3524a8cdf4 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -6,7 +6,7 @@ export Dense struct Dense{T} <: TensorStorage data::Vector{T} - Dense{T}(data::Vector) where {T} = new{T}(convert(Vector{T},data)) + Dense{T}(data) where {T} = new{T}(convert(Vector{T},data)) Dense{T}() where {T} = new{T}(Vector{T}()) end @@ -22,12 +22,15 @@ Base.similar(::Type{Dense{T}},dims) where {T} = Dense{T}(similar(Vector{T},dim(d Base.similar(D::Dense,::Type{T}) where {T} = Dense{T}(similar(data(D),T)) Base.copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) +# convert to complex +# TODO: this could be a generic TensorStorage function +Base.complex(D::Dense{T}) where {T} = Dense{complex(T)}(complex(data(D))) + Base.eltype(::Dense{T}) where {T} = eltype(T) # This is necessary since for some reason inference doesn't work # with the more general definition (eltype(Nothing) === Any) Base.eltype(::Dense{Nothing}) = Nothing Base.eltype(::Type{Dense{T}}) where {T} = eltype(T) -#Base.length(D::Dense) = length(data(D)) Base.promote_rule(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} Base.convert(::Type{Dense{R}},D::Dense) where {R} = Dense{R}(convert(Vector{R},data(D))) @@ -43,9 +46,8 @@ Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() Base.getindex(T::DenseTensor,i::Integer) = store(T)[i] Base.setindex!(T::DenseTensor,v,i::Integer) = (store(T)[i] = v) -function Base.convert(::Type{Tensor{ElR,NR,StoreR,IndsR}}, - T::Tensor{ElT,NT,StoreT,IndsT}) where {ElR,NR,StoreR<:Dense,IndsR, - ElT,NT,StoreT<:Dense,IndsT} +function Base.convert(::Type{Tensor{<:Number,<:Any,StoreR}}, + T::DenseTensor{<:Number,<:Any}) where {StoreR<:Dense} return Tensor(convert(StoreR,store(T)),inds(T)) end @@ -63,17 +65,29 @@ end # Create an Array that is a view of the Dense Tensor # Useful for using Base Array functions Base.Array(T::DenseTensor) = reshape(data(store(T)),dims(inds(T))) -Base.Vector(T::DenseTensor) = vec(Array(T)) +Base.Matrix(T::DenseTensor{<:Number,2}) = Array(T) +Base.Vector(T::DenseTensor{<:Number,1}) = Array(T) + +# TODO: call permutedims!(R,T,perm,(r,t)->t)? +function Base.permutedims!(R::DenseTensor{<:Number,N}, + T::DenseTensor{<:Number,N}, + perm::NTuple{N,Int}) where {N} + permutedims!(Array(R),Array(T),perm) + return R +end -function Base.permutedims!(T1::DenseTensor{<:Number,N}, - T2::DenseTensor{<:Number,N}, - perm) where {N} - permutedims!(Array(T1),Array(T2),perm) - return T1 +# Version that may overwrite the result or promote +# and return the result +function permutedims!!(R::DenseTensor{<:Number,N}, + T::DenseTensor{<:Number,N}, + perm::NTuple{N,Int},f=(r,t)->t) where {N} + permutedims!(R,T,perm,f) + return R end -function Base.permutedims(T::DenseTensor, - perm) +# TODO: move to tensor.jl? +function Base.permutedims(T::Tensor{<:Number,N}, + perm::NTuple{N,Int}) where {N} Tp = similar(T,permute(inds(T),perm)) permutedims!(Tp,T,perm) return Tp @@ -242,9 +256,8 @@ end @inline tuplejoin(x, y) = (x..., y...) @inline tuplejoin(x, y, z...) = (x..., tuplejoin(y, z...)...) -# TODO: write this function """ -combinedims(T::Tensor,pos) +permute_reshape(T::Tensor,pos) Takes a permutation that is split up into tuples. Index positions within the tuples are combined. diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index 7c2451df6b..a4cbff168a 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -2,62 +2,99 @@ export Diag # Diag can have either Vector storage, in which case # it is a general Diag tensor, or scalar storage, -# in which case it has a uniform value -mutable struct Diag{T} <: TensorStorage +# in which case the diagonal has a uniform value +struct Diag{T} <: TensorStorage data::T - Diag{T}(data::Vector) where {T<:AbstractVector} = new{T}(data) - Diag{T}(size::Integer) where {T<:AbstractVector{S}} where S = new{T}(Vector{S}(undef,size)) - Diag{T}(x::Number,size::Integer) where {T<:AbstractVector} = new{T}(fill(convert(eltype(T),x),size)) + Diag{T}(data) where {T} = new{T}(data) + #Diag{T}(data) where {T<:Number} = new{T}(convert(T,x)) + #Diag{T}(size::Integer) where {T<:AbstractVector{S}} where S = new{T}(Vector{S}(undef,size)) + #Diag{T}(x::Number,size::Integer) where {T<:AbstractVector} = new{T}(fill(convert(eltype(T),x),size)) #Diag{T}() where {T} = new{T}(Vector{T}()) # Make a uniform Diag storage - Diag{T}(x::Number) where {T<:Number} = new{T}(convert(T,x)) # Determine the storage type parameter from the input - Diag(data::T) where T = new{T}(data) + #Diag(data::T) where T = new{T}(data) end data(D::Diag) = D.data + +const NonuniformDiag{T} = Diag{T} where {T<:AbstractVector} +const UniformDiag{T} = Diag{T} where {T<:Number} + +Base.getindex(D::NonuniformDiag,i::Int)= data(D)[i] +Base.getindex(D::UniformDiag,i::Int) = data(D) + +Base.setindex!(D::Diag,val,i::Int)= (data(D)[i] = val) +Base.setindex!(D::UniformDiag,val,i::Int)= error("Cannot set elements of a uniform Diag storage") + +# convert to complex +# TODO: this could be a generic TensorStorage function +Base.complex(D::Diag{T}) where {T} = Diag{complex(T)}(complex(data(D))) + +Base.copy(D::Diag{T}) where {T} = Diag{T}(copy(data(D))) + Base.eltype(::Diag{T}) where {T} = eltype(T) Base.eltype(::Type{Diag{T}}) where {T} = eltype(T) -getindex(D::Diag{T},i::Int) where {T<:AbstractVector}= data(D)[i] -# Version of getindex for uniform (scalar) storage -getindex(D::Diag{T},i::Int) where {T<:Number} = data(D) -*(D::Diag{T},x::S) where {T<:AbstractVector,S<:Number} = Dense{promote_type(eltype(D),S)}(x*data(D)) -*(x::Number,D::Diag) = D*x + +# Deal with uniform Diag conversion +Base.convert(::Type{Diag{T}},D::Diag) where T = Diag{T}(data(D)) + +#*(D::Diag{T},x::S) where {T<:AbstractVector,S<:Number} = Dense{promote_type(eltype(D),S)}(x*data(D)) +#*(x::Number,D::Diag) = D*x # # Type promotions involving Diag # Useful for knowing how conversions should work when adding and contracting # -Base.promote_type(::Type{Diag{T1}},::Type{Diag{T2}}) where {T1<:AbstractVector,T2<:AbstractVector} = Diag{promote_type(T1,T2)} +Base.promote_rule(::Type{Diag{T1}},::Type{Diag{T2}}) where + {T1,T2} = Diag{promote_type(T1,T2)} -Base.promote_type(::Type{Diag{T1}},::Type{Diag{T2}}) where {T1<:Number,T2<:Number} = Diag{promote_type(T1,T2)} +# TODO: how do we make this work more generally for T2<:AbstractVector{S2}? +# Make a similar_type(AbstractVector{S2},T1) -> AbstractVector{T1} function? +Base.promote_rule(::Type{UniformDiag{T1}},::Type{NonuniformDiag{T2}}) where + {T1,T2<:Vector{S2}} where S2 = Diag{Vector{promote_type(T1,S2)}} -Base.promote_type(::Type{Diag{T1}},::Type{Diag{T2}}) where {T1<:Number,T2<:Vector{S2}} where S2 = Diag{Vector{promote_type(T1,S2)}} +Base.promote_rule(::Type{Dense{T1}},::Type{Diag{T2}}) where + {T1,T2} = Dense{promote_type(T1,eltype(T2))} -Base.promote_type(::Type{Diag{T1}},::Type{Diag{T2}}) where {T1<:Vector{S1},T2<:Number} where S1 = promote_type(Diag{T2},Diag{T1}) +const DiagTensor{El,N,Inds} = Tensor{El,N,<:Diag,Inds} +const NonuniformDiagTensor{El,N,Inds} = Tensor{El,N,<:NonuniformDiag,Inds} +const UniformDiagTensor{El,N,Inds} = Tensor{El,N,<:UniformDiag,Inds} -Base.promote_type(::Type{Dense{T1}},::Type{Diag{T2}}) where {T1<:Number,T2<:Vector{S2}} where S2 = Dense{promote_type(T1,S2)} +Base.IndexStyle(::Type{TensorT}) where {TensorT<:DiagTensor} = IndexCartesian() -Base.promote_type(::Type{Dense{T1}},::Type{Diag{T2}}) where {T1<:Number,T2<:Number} = Dense{promote_type(T1,T2)} +# TODO: this needs to be better (promote element type, check order compatibility, +# etc. +function Base.convert(::Type{TensorT}, T::DiagTensor{ElT,N,IndsT}) where + {TensorT<:Tensor{ElT,N,<:Dense,IndsT}} where {ElT,N,IndsT} + return dense(T) +end -Base.promote_type(::Type{Diag{T1}},::Type{Dense{T2}}) where {T1,T2} = promote_type(Dense{T2},Diag{T1}) +diag_length(T::DiagTensor) = minimum(dims(T)) -# TODO: define length for Diag, should this be -# the length of the diagonal -# or the product of the tensor dimensions? -# How is the length defined for scalar storage? -length(D::Diag{T}) where {T<:AbstractVector} = length(data(D)) +function Base.getindex(T::DiagTensor{ElT,N},inds::Vararg{Int,N}) where {ElT,N} + if all(==(inds[1]),inds) + return store(T)[inds[1]] + else + return zero(eltype(ElT)) + end +end -# convert to Dense -function storage_dense(D::Diag,is::IndexSet) - return Dense{eltype(D)}(vec(storage_convert(Array,D,is))) +# Set diagonal elements +# Throw error for off-diagonal +function Base.setindex!(T::DiagTensor{<:Number,N},val,inds::Vararg{Int,N}) where {N} + all(==(inds[1]),inds) || error("Cannot set off-diagonal element of Diag storage") + return store(T)[inds[1]] = val end -# convert to complex -storage_complex(D::Diag) = Diag(complex(data(D))) +function Base.setindex!(T::UniformDiagTensor{<:Number,N},val,inds::Vararg{Int,N}) where {N} + error("Cannot set elements of a uniform Diag storage") +end -copy(D::Diag{T}) where {T} = Diag{T}(copy(data(D))) +# convert to Dense +function dense(D::DiagTensor) + return Tensor(Dense{eltype(D)}(vec(Array(D))),inds(D)) +end # TODO: implement this in a sparse way # For now, we will just make them dense since the output is dense anyway @@ -76,53 +113,51 @@ function storage_convert(::Type{Array},D::Diag,is::IndexSet) return A end -Base.convert(::Type{Diag{T}},D::Diag) where T = Diag{T}(data(D)) - -function storage_convert(::Type{Dense{T}},D::Diag,is::IndexSet) where T - return Dense{T}(vec(storage_convert(Array,D,is))) -end +#function storage_convert(::Type{Dense{T}},D::Diag,is::IndexSet) where T +# return Dense{T}(vec(storage_convert(Array,D,is))) +#end storage_fill!(D::Diag,x::Number) = fill!(data(D),x) -function diag_getindex(Tstore::Diag{<:AbstractVector}, - val::Int) - return getindex(data(Tstore),val) -end +#function diag_getindex(Tstore::Diag{<:AbstractVector}, +# val::Int) +# return getindex(data(Tstore),val) +#end # Uniform case -function diag_getindex(Tstore::Diag{<:Number}, - val::Int) - return data(Tstore) -end +#function diag_getindex(Tstore::Diag{<:Number}, +# val::Int) +# return data(Tstore) +#end # Get diagonal elements # Gives zero for off-diagonal elements -function storage_getindex(Tstore::Diag{T}, - Tis::IndexSet, - vals::Union{Int, AbstractVector{Int}}...) where {T} - if all(==(vals[1]),vals) - return diag_getindex(Tstore,vals[1]) - else - return zero(eltype(T)) - end -end +#function storage_getindex(Tstore::Diag{T}, +# Tis::IndexSet, +# vals::Union{Int, AbstractVector{Int}}...) where {T} +# if all(==(vals[1]),vals) +# return diag_getindex(Tstore,vals[1]) +# else +# return zero(eltype(T)) +# end +#end # Set diagonal elements # Throw error for off-diagonal -function storage_setindex!(Tstore::Diag{<:AbstractVector}, - Tis::IndexSet, - x::Union{<:Number, AbstractArray{<:Number}}, - vals::Union{Int, AbstractVector{Int}}...) - all(y->y==vals[1],vals) || error("Cannot set off-diagonal element of Diag storage") - return setindex!(data(Tstore),x,vals[1]) -end - -function storage_setindex!(Tstore::Diag{<:Number}, - Tis::IndexSet, - x::Union{<:Number, AbstractArray{<:Number}}, - vals::Union{Int, AbstractVector{Int}}...) - error("Cannot set elements of a uniform Diag storage") -end +#function storage_setindex!(Tstore::Diag{<:AbstractVector}, +# Tis::IndexSet, +# x::Union{<:Number, AbstractArray{<:Number}}, +# vals::Union{Int, AbstractVector{Int}}...) +# all(y->y==vals[1],vals) || error("Cannot set off-diagonal element of Diag storage") +# return setindex!(data(Tstore),x,vals[1]) +#end +# +#function storage_setindex!(Tstore::Diag{<:Number}, +# Tis::IndexSet, +# x::Union{<:Number, AbstractArray{<:Number}}, +# vals::Union{Int, AbstractVector{Int}}...) +# error("Cannot set elements of a uniform Diag storage") +#end # Add generic Diag's in-place function _add!(Bstore::Diag, @@ -225,23 +260,49 @@ function storage_mult(Astore::Diag{T}, return Diag(Bdata) end -# TODO: make this a special version of storage_add!() -# This shouldn't do anything to the data -function storage_permute!(Bstore::Diag, - Bis::IndexSet, - Astore::Diag, - Ais::IndexSet) - p = calculate_permutation(Bis,Ais) - Adata = data(Astore) - Bdata = data(Bstore) - if is_trivial_permutation(p) - Bdata .= Adata - else - reshapeBdata = reshape(Bdata,dims(Bis)) - permutedims!(reshapeBdata,reshape(Adata,dims(Ais)),p) +function Base.permutedims!(R::DiagTensor{<:Number,N}, + T::DiagTensor{<:Number,N}, + perm::NTuple{N,Int},f::Function=(r,t)->t) where {N} + for i=1:diag_length(R) + diag_inds = CartesianIndex{N}(ntuple(_->i,Val(N))) + @inbounds R[diag_inds] = f(R[diag_inds],T[diag_inds]) end + return R +end + +# Version that may overwrite in-place or may return the result +function permutedims!!(R::NonuniformDiagTensor{<:Number,N}, + T::NonuniformDiagTensor{<:Number,N}, + perm::NTuple{N,Int},f::Function=(r,t)->t) where {N} + permutedims!(R,T,perm,f) + return R +end + +function permutedims!!(R::UniformDiagTensor{ElR,N}, + T::UniformDiagTensor{ElT,N}, + perm::NTuple{N,Int},f::Function=(r,t)->t) where {ElR,ElT,N} + diag_inds = CartesianIndex(ntuple(_->1,Val(N))) + R = Tensor(Diag{promote_type(ElR,ElT)}(R[diag_inds]+T[diag_inds]),inds(R)) + return R end +# TODO: make this a special version of storage_add!() +# This shouldn't do anything to the data +#function storage_permute!(Bstore::Diag, +# Bis::IndexSet, +# Astore::Diag, +# Ais::IndexSet) +# p = calculate_permutation(Bis,Ais) +# Adata = data(Astore) +# Bdata = data(Bstore) +# if is_trivial_permutation(p) +# Bdata .= Adata +# else +# reshapeBdata = reshape(Bdata,dims(Bis)) +# permutedims!(reshapeBdata,reshape(Adata,dims(Ais)),p) +# end +#end + # TODO: move this to tensorstorage.jl? function storage_dag(Astore::Diag,Ais::IndexSet) return dag(Ais),storage_conj(Astore) diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 0b2a4c76c5..410a9d8386 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -40,6 +40,8 @@ Base.size(T::Tensor) = dims(T) Base.copy(T::Tensor) = Tensor(copy(store(T)),copy(inds(T))) +Base.complex(T::Tensor) = Tensor(complex(store(T)),copy(inds(T))) + function Base.similar(::Type{T},dims) where {T<:Tensor{<:Any,<:Any,StoreT}} where {StoreT} return Tensor(similar(StoreT,dims),dims) end From 685d94d7a429fa98d5b02c8ed749a83243b7afb6 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 10 Sep 2019 16:40:10 -0400 Subject: [PATCH 08/27] Fix DenseTensor tests that had gotten broken --- src/index.jl | 4 +- src/indexset.jl | 38 ++++----- src/itensor.jl | 14 +--- src/tensor/dense.jl | 64 ++++++++------- src/tensor/diag.jl | 161 +++++++++++++++++++++++-------------- src/tensor/tensor.jl | 4 + test/test_itensor_dense.jl | 2 +- 7 files changed, 161 insertions(+), 126 deletions(-) diff --git a/src/index.jl b/src/index.jl index b3242097f8..2cd92b8532 100644 --- a/src/index.jl +++ b/src/index.jl @@ -171,8 +171,8 @@ struct IndexVal return new(i,n) end end -getindex(i::Index, j::Int) = IndexVal(i, j) -getindex(i::Index, c::Colon) = [IndexVal(i, j) for j in 1:dim(i)] +Base.getindex(i::Index, j::Int) = IndexVal(i, j) +Base.getindex(i::Index, c::Colon) = [IndexVal(i, j) for j in 1:dim(i)] (i::Index)(n::Int) = IndexVal(i,n) diff --git a/src/indexset.jl b/src/indexset.jl index f1296456ad..6ce95fa060 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -24,6 +24,8 @@ struct IndexSet{N} inds::MVector{N,Index} IndexSet{N}(inds::MVector{N,Index}) where {N} = new{N}(inds) IndexSet{N}(inds::NTuple{N,Index}) where {N} = new{N}(inds) + IndexSet{0}() = new{0}(()) + IndexSet{0}(::Tuple{}) = new{0}(()) end IndexSet(inds::MVector{N,Index}) where {N} = IndexSet{N}(inds) IndexSet(inds::NTuple{N,Index}) where {N} = IndexSet{N}(inds) @@ -31,14 +33,20 @@ IndexSet(inds::NTuple{N,Index}) where {N} = IndexSet{N}(inds) inds(is::IndexSet) = is.inds # Empty constructor -IndexSet() = IndexSet(()) +IndexSet() = IndexSet{0}() +IndexSet(::Tuple{}) = IndexSet() # Construct of some size IndexSet(::Val{N}) where {N} = IndexSet(ntuple(_->Index(),Val(N))) # Construct from various sets of indices -IndexSet(inds::Vararg{Index,N}) where {N} = IndexSet{N}(NTuple{N,Index}(inds)) IndexSet{N}(inds::Vararg{Index,N}) where {N} = IndexSet{N}(NTuple{N,Index}(inds)) +IndexSet(inds::Vararg{Index,N}) where {N} = IndexSet{N}(inds...) + +IndexSet{N}(ivs::NTuple{N,IndexVal}) where {N} = IndexSet{N}(ntuple(i->ind(ivs[i]),Val(N))) +IndexSet(ivs::NTuple{N,IndexVal}) where {N} = IndexSet{N}(ivs) +IndexSet{N}(ivs::Vararg{IndexVal,N}) where {N} = IndexSet{N}(tuple(ivs...)) +IndexSet(ivs::Vararg{IndexVal,N}) where {N} = IndexSet{N}(tuple(ivs...)) # Construct from various sets of IndexSets IndexSet(inds::IndexSet) = inds @@ -560,26 +568,12 @@ getperm(col1,col2) Get the permutation that takes collection 2 to collection 1, such that col2[p].==col1 """ -function getperm(set1, set2) - # TODO: use perm = ntuple(i->findfirst(==(set2[i]),set1),Val(N)) - l1 = length(set1) - l2 = length(set2) - l1==l2 || throw(DimensionMismatch("Mismatched input sizes in calcPerm: l1=$l1, l2=$l2")) - p = zeros(Int,l1) - for i1 = 1:l1 - for i2 = 1:l2 - if set1[i1]==set2[i2] - p[i1] = i2 - break - end - end #i2 - p[i1]!=0 || error("Sets aren't permutations of each other") - end #i1 - return p -end - -function getperm(s1::IndexSet{N},s2::IndexSet{N}) where {N} - return ntuple(i->findfirst(==(s2[i]),s1),Val(N)) +function getperm(s1,s2) + return _getperm(IndexSet(s1),IndexSet(s2)) +end + +function _getperm(s1::IndexSet{N},s2::IndexSet{N}) where {N} + return ntuple(i->findfirst(==(s1[i]),s2),Val(N)) end """ diff --git a/src/itensor.jl b/src/itensor.jl index fdaf0adc1a..04552c73fb 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -269,7 +269,7 @@ Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = Tensor(T)[vals...] function Base.getindex(T::ITensor{N},ivs::Vararg{IndexVal,N}) where {N} p = getperm(inds(T),ivs) - vals = val.(ivs)[p] + vals = permute(val.(ivs),p) return T[vals...] end @@ -286,7 +286,7 @@ Base.setindex!(T::ITensor{N},x::Number,vals::Vararg{Int,N}) where {N} = (Tensor( function Base.setindex!(T::ITensor,x::Number,ivs::IndexVal...) p = getperm(inds(T),ivs) - vals = val.(ivs)[p] + vals = permute(val.(ivs),p) return T[vals...] = x end @@ -358,15 +358,7 @@ settags(A::ITensor,vargs...) = ITensor(store(A),settags(inds(A))) swaptags(A::ITensor,vargs...) = ITensor(store(A),swaptags(inds(A),vargs...)) # TODO: implement in a better way (more generically for other storage) -function Base.:(==)(A::ITensor,B::ITensor) - !hassameinds(A,B) && return false - #IndexVal.(inds(A),Tuple(i)) - p = getperm(inds(B),inds(A)) - for i ∈ CartesianIndices(dims(A)) - A[Tuple(i)...] ≠ B[Tuple(i)[p]...] && return false - end - return true -end +Base.:(==)(A::ITensor,B::ITensor) = (norm(A-B) == zero(promote_type(eltype(A),eltype(B)))) function Base.isapprox(A::ITensor, B::ITensor; diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 3524a8cdf4..ce8ff506f6 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -46,8 +46,16 @@ Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() Base.getindex(T::DenseTensor,i::Integer) = store(T)[i] Base.setindex!(T::DenseTensor,v,i::Integer) = (store(T)[i] = v) -function Base.convert(::Type{Tensor{<:Number,<:Any,StoreR}}, - T::DenseTensor{<:Number,<:Any}) where {StoreR<:Dense} +Base.strides(T::DenseTensor) = strides(inds(T)) + +# Needed for passing Tensor{T,2} to BLAS +function Base.unsafe_convert(::Type{Ptr{ElT}},T::DenseTensor{ElT}) where {ElT} + return Base.unsafe_convert(Ptr{ElT},data(store(T))) +end + +function Base.convert(::Type{TensorR}, + T::DenseTensor{ElT,N}) where {TensorR<:Tensor{ElR,N,StoreR}} where + {ElR,ElT,N,StoreR<:Dense} return Tensor(convert(StoreR,store(T)),inds(T)) end @@ -185,50 +193,48 @@ function _contract!(R::DenseTensor,labelsR, T2::DenseTensor,labelsT2) props = ContractionProperties(labelsT1,labelsT2,labelsR) compute_contraction_properties!(props,T1,T2,R) - _contract_dense_dense!(Array(R),props,Array(T1),Array(T2)) + _contract!(R,T1,T2,props) return R end -# TODO: move this directly into contract? Or write the signature -# in terms of Tensor (so that it can be used in block sparse contractions) -function _contract_dense_dense!(C::Array{T}, - p::ContractionProperties, - A::Array{T}, - B::Array{T}, - α::T=one(T), - β::T=zero(T)) where {T} +function _contract!(C::DenseTensor{T}, + A::DenseTensor{T}, + B::DenseTensor{T}, + props::ContractionProperties, + α::T=one(T), + β::T=zero(T)) where {T} tA = 'N' - if p.permuteA - aref = reshape(permutedims(A,p.PA),p.dmid,p.dleft) + if props.permuteA + aref = reshape(permutedims(A,props.PA),props.dmid,props.dleft) tA = 'T' else #A doesn't have to be permuted - if Atrans(p) - aref = reshape(A,p.dmid,p.dleft) + if Atrans(props) + aref = reshape(A,props.dmid,props.dleft) tA = 'T' else - aref = reshape(A,p.dleft,p.dmid) + aref = reshape(A,props.dleft,props.dmid) end end tB = 'N' - if p.permuteB - bref = reshape(permutedims(B,p.PB),p.dmid,p.dright) + if props.permuteB + bref = reshape(permutedims(B,props.PB),props.dmid,props.dright) else - if Btrans(p) - bref = reshape(B,p.dright,p.dmid) + if Btrans(props) + bref = reshape(B,props.dright,props.dmid) tB = 'T' else - bref = reshape(B,p.dmid,p.dright) + bref = reshape(B,props.dmid,props.dright) end end # TODO: this logic may be wrong - if p.permuteC - cref = reshape(copy(C),p.dleft,p.dright) + if props.permuteC + cref = reshape(copy(C),props.dleft,props.dright) else - if Ctrans(p) - cref = reshape(C,p.dleft,p.dright) + if Ctrans(props) + cref = reshape(C,props.dleft,props.dright) if tA=='N' && tB=='N' (aref,bref) = (bref,aref) tA = tB = 'T' @@ -237,17 +243,17 @@ function _contract_dense_dense!(C::Array{T}, tA = tB = 'N' end else - cref = reshape(C,p.dleft,p.dright) + cref = reshape(C,props.dleft,props.dright) end end #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) BLAS.gemm!(tA,tB,α,aref,bref,β,cref) - if p.permuteC - permutedims!(C,reshape(cref,p.newCrange...),p.PC) + if props.permuteC + permutedims!(C,reshape(cref,props.newCrange...),props.PC) end - return + return C end # Combine a bunch of tuples diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index a4cbff168a..54e5832d3b 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -36,7 +36,10 @@ Base.eltype(::Diag{T}) where {T} = eltype(T) Base.eltype(::Type{Diag{T}}) where {T} = eltype(T) # Deal with uniform Diag conversion -Base.convert(::Type{Diag{T}},D::Diag) where T = Diag{T}(data(D)) +Base.convert(::Type{StorageT},D::Diag) where {StorageT<:Diag{T}} where {T} = Diag{T}(data(D)) + +Base.similar(D::Diag{T}) where {T} = Diag{T}(similar(data(D))) +Base.similar(D::UniformDiag{T}) where {T<:Number} = Diag{T}(zero(T)) #*(D::Diag{T},x::S) where {T<:AbstractVector,S<:Number} = Dense{promote_type(eltype(D),S)}(x*data(D)) #*(x::Number,D::Diag) = D*x @@ -65,8 +68,8 @@ Base.IndexStyle(::Type{TensorT}) where {TensorT<:DiagTensor} = IndexCartesian() # TODO: this needs to be better (promote element type, check order compatibility, # etc. -function Base.convert(::Type{TensorT}, T::DiagTensor{ElT,N,IndsT}) where - {TensorT<:Tensor{ElT,N,<:Dense,IndsT}} where {ElT,N,IndsT} +function Base.convert(::Type{TensorT}, T::DiagTensor{ElT,N}) where + {TensorT<:Tensor{ElT,N,<:Dense}} where {ElT,N} return dense(T) end @@ -160,64 +163,64 @@ storage_fill!(D::Diag,x::Number) = fill!(data(D),x) #end # Add generic Diag's in-place -function _add!(Bstore::Diag, - Bis::IndexSet, - Astore::Diag, - Ais::IndexSet, - x::Number = 1) - # This is just used to check if the index sets - # are permutations of each other, maybe - # this should be at the ITensor level - p = calculate_permutation(Bis,Ais) - Adata = data(Astore) - Bdata = data(Bstore) - if x == 1 - Bdata .+= Adata - else - Bdata .+= x .* Adata - end -end +#function _add!(Bstore::Diag, +# Bis::IndexSet, +# Astore::Diag, +# Ais::IndexSet, +# x::Number = 1) +# # This is just used to check if the index sets +# # are permutations of each other, maybe +# # this should be at the ITensor level +# p = calculate_permutation(Bis,Ais) +# Adata = data(Astore) +# Bdata = data(Bstore) +# if x == 1 +# Bdata .+= Adata +# else +# Bdata .+= x .* Adata +# end +#end # Uniform Diag case -function _add!(Bstore::Diag{BT}, - Bis::IndexSet, - Astore::Diag{AT}, - Ais::IndexSet, - x::Number = 1) where {BT<:Number,AT<:Number} - # This is just used to check if the index sets - # are permutations of each other, maybe - # this should be at the ITensor level - p = calculate_permutation(Bis,Ais) - if x == 1 - Bstore.data += data(Astore) - else - Bstore.data += x * data(Astore) - end -end +#function _add!(Bstore::Diag{BT}, +# Bis::IndexSet, +# Astore::Diag{AT}, +# Ais::IndexSet, +# x::Number = 1) where {BT<:Number,AT<:Number} +# # This is just used to check if the index sets +# # are permutations of each other, maybe +# # this should be at the ITensor level +# p = calculate_permutation(Bis,Ais) +# if x == 1 +# Bstore.data += data(Astore) +# else +# Bstore.data += x * data(Astore) +# end +#end -function _add!(Bstore::Dense, - Bis::IndexSet, - Astore::Diag, - Ais::IndexSet, - x::Number = 1) - # This is just used to check if the index sets - # are permutations of each other - p = calculate_permutation(Bis,Ais) - Bdata = data(Bstore) - mindim = minDim(Bis) - reshapeBdata = reshape(Bdata,dims(Bis)) - if x == 1 - for ii = 1:mindim - # TODO: this should be optimized, maybe use - # strides instead of reshape? - reshapeBdata[fill(ii,order(Bis))...] += Astore[ii] - end - else - for ii = 1:mindim - reshapeBdata[fill(ii,order(Bis))...] += x*Astore[ii] - end - end -end +#function _add!(Bstore::Dense, +# Bis::IndexSet, +# Astore::Diag, +# Ais::IndexSet, +# x::Number = 1) +# # This is just used to check if the index sets +# # are permutations of each other +# p = calculate_permutation(Bis,Ais) +# Bdata = data(Bstore) +# mindim = minDim(Bis) +# reshapeBdata = reshape(Bdata,dims(Bis)) +# if x == 1 +# for ii = 1:mindim +# # TODO: this should be optimized, maybe use +# # strides instead of reshape? +# reshapeBdata[fill(ii,order(Bis))...] += Astore[ii] +# end +# else +# for ii = 1:mindim +# reshapeBdata[fill(ii,order(Bis))...] += x*Astore[ii] +# end +# end +#end # TODO: implement this # This should not require any permutation @@ -263,6 +266,7 @@ end function Base.permutedims!(R::DiagTensor{<:Number,N}, T::DiagTensor{<:Number,N}, perm::NTuple{N,Int},f::Function=(r,t)->t) where {N} + # TODO: check that inds(R)==permute(inds(T),perm)? for i=1:diag_length(R) diag_inds = CartesianIndex{N}(ntuple(_->i,Val(N))) @inbounds R[diag_inds] = f(R[diag_inds],T[diag_inds]) @@ -270,19 +274,54 @@ function Base.permutedims!(R::DiagTensor{<:Number,N}, return R end +function Base.permutedims(T::DiagTensor{<:Number,N}, + perm::NTuple{N,Int},f::Function=identity) where {N} + R = similar(T,permute(inds(T),perm)) + permutedims!(R,T,perm,f) + return R +end + +function Base.permutedims(T::UniformDiagTensor{ElT,N}, + perm::NTuple{N,Int}, + f::Function=identity) where {ElR,ElT,N} + diag_inds = CartesianIndex(ntuple(_->1,Val(N))) + R = Tensor(Diag{promote_type(ElR,ElT)}(f(T[diag_inds])),permute(inds(T),perm)) + return R +end + # Version that may overwrite in-place or may return the result function permutedims!!(R::NonuniformDiagTensor{<:Number,N}, T::NonuniformDiagTensor{<:Number,N}, - perm::NTuple{N,Int},f::Function=(r,t)->t) where {N} + perm::NTuple{N,Int}, + f::Function=(r,t)->t) where {N} permutedims!(R,T,perm,f) return R end function permutedims!!(R::UniformDiagTensor{ElR,N}, T::UniformDiagTensor{ElT,N}, - perm::NTuple{N,Int},f::Function=(r,t)->t) where {ElR,ElT,N} + perm::NTuple{N,Int}, + f::Function=(r,t)->t) where {ElR,ElT,N} diag_inds = CartesianIndex(ntuple(_->1,Val(N))) - R = Tensor(Diag{promote_type(ElR,ElT)}(R[diag_inds]+T[diag_inds]),inds(R)) + R = Tensor(Diag{promote_type(ElR,ElT)}(f(R[diag_inds],T[diag_inds])),inds(R)) + return R +end + +function Base.permutedims!(R::DenseTensor{ElR,N}, + T::DiagTensor{ElT,N}, + perm::NTuple{N,Int}, + f::Function = (r,t)->t) where {ElR,ElT,N} + for i = 1:diag_length(T) + diag_inds = CartesianIndex(ntuple(_->i,Val(N))) + @inbounds R[diag_inds] = f(R[diag_inds],T[diag_inds]) + end + return R +end + +function permutedims!!(R::DenseTensor{ElR,N}, + T::DiagTensor{ElT,N}, + perm::NTuple{N,Int},f::Function=(r,t)->t) where {ElR,ElT,N} + permutedims!(R,T,perm,f) return R end diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 410a9d8386..a4c3dea9bd 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -25,6 +25,10 @@ ind(T::Tensor,j::Integer) = inds(T)[j] dims(ds::Dims) = ds dim(ds::Dims) = prod(ds) +# This may be a bad idea to overload? +# Type piracy? +Base.strides(is::Dims) = Base.size_to_strides(1, dims(is)...) + # This is to help with some generic programming in the Tensor # code (it helps to construct a Tuple(::NTuple{N,Int}) where the # only known thing for dispatch is a concrete type such diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index f1e2ae203d..6fdb644478 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -5,7 +5,7 @@ using ITensors, Random.seed!(12345) -digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) +digits(::Type{T},x...) where {T} = T(sum([x[length(x)-k+1]*10^(k-1) for k=1:length(x)])) @testset "Dense ITensor basic functionality" begin From b163b06e12b5d2c12d3ee08495b1a9fd2c651597 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 10 Sep 2019 17:10:19 -0400 Subject: [PATCH 09/27] Start Diag*Diag contraction --- src/tensor/dense.jl | 5 +++-- src/tensor/tensor.jl | 10 +++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index ce8ff506f6..be8f3bc52e 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -149,8 +149,9 @@ function outer(T1::DenseTensor,T2::DenseTensor) end const ⊗ = outer -function contract(T1::DenseTensor,labelsT1, - T2::DenseTensor,labelsT2) +# TODO: move to tensor.jl? +function contract(T1::Tensor,labelsT1, + T2::Tensor,labelsT2) indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) R = similar(promote_type(typeof(T1),typeof(T2)),indsR) contract!(R,labelsR,T1,labelsT1,T2,labelsT2) diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index a4c3dea9bd..e50b9e905c 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -67,8 +67,16 @@ Base.similar(T::Tensor,::Type{S},dims::Dims) where {S} = Tensor(similar(store(T) # return Tensor(convert(StoreR,store(T)),copy(inds(T))) #end +function Base.promote_rule(::Type{TensorT1}, + ::Type{TensorT2}) where + {TensorT1<:Tensor{ElT1,N1,StoreT1},TensorT2<:Tensor{ElT2,N2,StoreT2}} where + {ElT1,ElT2,N1,N2,StoreT1,StoreT2} + return Tensor{promote_type(ElT1,ElT2),N3,promote_type(StoreT1,StoreT2)} where {N3} +end + function Base.promote_rule(::Type{Tensor{ElT1,N,StoreT1,Inds}}, - ::Type{Tensor{ElT2,N,StoreT2,Inds}}) where {ElT1,ElT2,N,StoreT1,StoreT2,Inds} + ::Type{Tensor{ElT2,N,StoreT2,Inds}}) where {ElT1,ElT2,N, + StoreT1,StoreT2,Inds} return Tensor{promote_type(ElT1,ElT2),N,promote_type(StoreT1,StoreT2),Inds} end From b3c3cba1e35947f5a3f452bbf12fe2169249b9d9 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Thu, 12 Sep 2019 18:07:55 -0400 Subject: [PATCH 10/27] Get more Diag tests to pass (like Diag*Diag) --- src/indexset.jl | 33 ++++---- src/itensor.jl | 3 +- src/tensor/contraction_logic.jl | 63 ++------------ src/tensor/dense.jl | 64 +++++++++----- src/tensor/diag.jl | 144 ++++++++++++++++++++++---------- src/tensor/tensor.jl | 2 +- test/test_itensor_diag.jl | 2 +- 7 files changed, 172 insertions(+), 139 deletions(-) diff --git a/src/indexset.jl b/src/indexset.jl index 6ce95fa060..5fdfb1f2bb 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -37,7 +37,8 @@ IndexSet() = IndexSet{0}() IndexSet(::Tuple{}) = IndexSet() # Construct of some size -IndexSet(::Val{N}) where {N} = IndexSet(ntuple(_->Index(),Val(N))) +IndexSet{N}() where {N} = IndexSet{N}(ntuple(_->Index(),Val(N))) +IndexSet(::Val{N}) where {N} = IndexSet{N}() # Construct from various sets of indices IndexSet{N}(inds::Vararg{Index,N}) where {N} = IndexSet{N}(NTuple{N,Index}(inds)) @@ -476,11 +477,11 @@ swaptags(is, vargs...) = swaptags!(copy(is), vargs...) # Helper functions for contracting ITensors # -function compute_contraction_labels(Ai,Bi) +function compute_contraction_labels(Ai::IndexSet{N1},Bi::IndexSet{N2}) where {N1,N2} rA = length(Ai) rB = length(Bi) - Aind = zeros(Int,rA) - Bind = zeros(Int,rB) + Aind = MVector{N1}(ntuple(_->0,Val(N1))) + Bind = MVector{N2}(ntuple(_->0,Val(N2))) ncont = 0 for i = 1:rA, j = 1:rB @@ -498,20 +499,20 @@ function compute_contraction_labels(Ai,Bi) if(Bind[j]==0) Bind[j] = (u+=1) end end - return (Aind,Bind) + return (NTuple{N1,Int}(Aind),NTuple{N2,Int}(Bind)) end -function contract_inds(Ais::IndexSet, - Aind, - Bis::IndexSet, - Bind) +function contract_inds(Ais::IndexSet{N1}, + Aind::NTuple{N1,Int}, + Bis::IndexSet{N2}, + Bind::NTuple{N2,Int}) where {N1,N2} ncont = 0 for i in Aind if(i < 0) ncont += 1 end end - nuniq = length(Ais)+length(Bis)-2*ncont - Cind = zeros(Int,nuniq) - Cis = fill(Index(),nuniq) + NR = N1+N2-2*ncont + Cind = MVector{NR,Int}(ntuple(_->0,Val(NR))) #zeros(Int,NR) + Cis = IndexSet{NR}() #fill(Index(),NR) u = 1 for i ∈ 1:length(Ais) if(Aind[i] > 0) @@ -527,7 +528,7 @@ function contract_inds(Ais::IndexSet, u += 1 end end - return (IndexSet(Cis...),Cind) + return (Cis,NTuple{NR,Int}(Cind)) end # TODO: implement this in terms of a tuple, @@ -568,11 +569,7 @@ getperm(col1,col2) Get the permutation that takes collection 2 to collection 1, such that col2[p].==col1 """ -function getperm(s1,s2) - return _getperm(IndexSet(s1),IndexSet(s2)) -end - -function _getperm(s1::IndexSet{N},s2::IndexSet{N}) where {N} +function getperm(s1::Union{IndexSet{N},NTuple{N}}, s2::Union{IndexSet{N},NTuple{N}}) where {N} return ntuple(i->findfirst(==(s1[i]),s2),Val(N)) end diff --git a/src/itensor.jl b/src/itensor.jl index 04552c73fb..eea55cc907 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -9,6 +9,7 @@ export ITensor, isNull, scale!, normalize!, + randn!, multSiteOps, order, permute, @@ -18,7 +19,6 @@ export ITensor, store, dense - mutable struct ITensor{N} store::TensorStorage inds::IndexSet{N} @@ -487,7 +487,6 @@ function add!(R::ITensor{N},α::Number,T::ITensor{N}) where {N} perm = getperm(inds(R),inds(T)) TR,TT = Tensor(R),Tensor(T) - # TODO: make this into a permutedims!?(Tensor,Tensor,perm,f) function? # Include type promotion from α TR = convert(promote_type(typeof(TR),typeof(TT)),TR) TR = permutedims!!(TR,TT,perm,(r,t)->r+α*t) diff --git a/src/tensor/contraction_logic.jl b/src/tensor/contraction_logic.jl index 130dce0429..013e76a274 100644 --- a/src/tensor/contraction_logic.jl +++ b/src/tensor/contraction_logic.jl @@ -1,25 +1,8 @@ -## Check if the contraction is an outer product -## (none of the labels are negative) -#function is_outer(l1::Vector{Int},l2::Vector{Int}) -# for l1i in l1 -# if l1i < 0 -# return false -# end -# end -# # I think this one is not necessary -# for l2i in l2 -# if l2i < 0 -# return false -# end -# end -# return true -#end - -mutable struct ContractionProperties - ai::Vector{Int} - bi::Vector{Int} - ci::Vector{Int} +mutable struct ContractionProperties{Na,Nb,Nc} + ai::NTuple{Na,Int} + bi::NTuple{Nb,Int} + ci::NTuple{Nc,Int} nactiveA::Int nactiveB::Int nactiveC::Int @@ -44,16 +27,14 @@ mutable struct ContractionProperties newArange::Vector{Int} newBrange::Vector{Int} newCrange::Vector{Int} - function ContractionProperties(ai::Vector{Int}, - bi::Vector{Int}, - ci::Vector{Int}) - new(ai,bi,ci,0,0,0,Vector{Int}(),Vector{Int}(),Vector{Int}(),false,false,false,1,1,1,0, + function ContractionProperties(ai::NTuple{Na,Int},bi::NTuple{Nb,Int},ci::NTuple{Nc,Int}) where {Na,Nb,Nc} + new{Na,Nb,Nc}(ai,bi,ci,0,0,0,Vector{Int}(),Vector{Int}(),Vector{Int}(),false,false,false,1,1,1,0, length(ai),length(bi),length(ai),length(bi),Vector{Int}(),Vector{Int}(),Vector{Int}(), false,Vector{Int}(),Vector{Int}(),Vector{Int}()) end end -function compute_perms!(props::ContractionProperties)::Nothing +function compute_perms!(props::ContractionProperties) #Use !AtoB.empty() as a check to see if we've already run this length(props.AtoB)!=0 && return @@ -101,13 +82,6 @@ function compute_perms!(props::ContractionProperties)::Nothing end -#function is_trivial_permutation(P::Vector{Int})::Bool -# for n = 1:length(P) -# P[n]!=n && return false -# end -# return true -#end - function checkACsameord(props::ContractionProperties)::Bool props.Austart>=length(props.ai) && return true aCind = props.AtoC[props.Austart] @@ -138,25 +112,6 @@ Atrans(props::ContractionProperties) = contractedA(props,1) Btrans(props::ContractionProperties) = !contractedB(props,1) Ctrans(props::ContractionProperties) = props.ctrans -# TODO: replace find_index(v,t) with built in Julia function: -# findfirst(==(t), v) -#function find_index(v::Vector{Int},t)::Int -# for i = 1:length(v) -# v[i]==t && return i -# end -# return -1 -#end - -#function permute_extents(R::Vector{Int},P::Vector{Int})::Vector{Int} -# Rb = fill(0,length(R)) -# n = 1 -# for pn in P -# Rb[pn] = R[n] -# n += 1 -# end -# return Rb -#end - function compute_contraction_properties!(props::ContractionProperties, A, B, C) compute_perms!(props) @@ -287,7 +242,7 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) for k = 1:rc #j = find_index(props.ai,props.ci[k]) j = findfirst(==(props.ci[k]),props.ai) - if j!=-1 + if j!==nothing props.AtoC[newi] = k props.PA[newi+1] = j newi += 1 @@ -349,7 +304,7 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) for k = 1:rc #j = find_index(props.bi,props.ci[k]) j = findfirst(==(props.ci[k]),props.bi) - if j!=-1 + if j!==nothing props.BtoC[newi] = k props.PB[newi+1] = j newi += 1 diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index be8f3bc52e..a0830aecf5 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -1,4 +1,6 @@ -export Dense +export Dense, + outer, + ⊗ # # Dense storage @@ -21,6 +23,7 @@ Base.similar(D::Dense{T},dims) where {T} = Dense{T}(similar(data(D),dim(dims))) Base.similar(::Type{Dense{T}},dims) where {T} = Dense{T}(similar(Vector{T},dim(dims))) Base.similar(D::Dense,::Type{T}) where {T} = Dense{T}(similar(data(D),T)) Base.copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) +Base.copyto!(D1::Dense,D2::Dense) = copyto!(data(D1),data(D2)) # convert to complex # TODO: this could be a generic TensorStorage function @@ -39,7 +42,7 @@ Base.convert(::Type{Dense{R}},D::Dense) where {R} = Dense{R}(convert(Vector{R},d # DenseTensor (Tensor using Dense storage) # -const DenseTensor{El,N,Inds} = Tensor{El,N,<:Dense,Inds} +const DenseTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Dense} # Basic functionality for AbstractArray interface Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() @@ -143,24 +146,37 @@ using Base.Cartesian: @nexprs, return TP end end +function Base.permutedims!(TP::DenseTensor{<:Number,0}, + T::DenseTensor{<:Number,0}, + perm, + f::Function) + TP[] = f(TP[],T[]) + return TP +end -function outer(T1::DenseTensor,T2::DenseTensor) - return Tensor(Dense(vec(Vector(T1)*transpose(Vector(T2)))),union(inds(T1),inds(T2))) +function outer(T1::DenseTensor{ElT1},T2::DenseTensor{ElT2}) where {ElT1,ElT2} + array_outer = vec(Array(T1))*transpose(vec(Array(T2))) + inds_outer = unioninds(inds(T1),inds(T2)) + return Tensor(Dense{promote_type(ElT1,ElT2)}(vec(array_outer)),inds_outer) end const ⊗ = outer # TODO: move to tensor.jl? -function contract(T1::Tensor,labelsT1, - T2::Tensor,labelsT2) +function contract(T1::Tensor{<:Number,N1},labelsT1, + T2::Tensor{<:Number,N2},labelsT2) where {N1,N2} indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) - R = similar(promote_type(typeof(T1),typeof(T2)),indsR) - contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + if N1+N2==length(indsR) + R = T1⊗T2 + else + R = similar(promote_type(typeof(T1),typeof(T2)),indsR) + contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + end return R end -function contract!(R::DenseTensor,labelsR, - T1::DenseTensor{<:Number,N1},labelsT1, - T2::DenseTensor{<:Number,N2},labelsT2) where {N1,N2} +function contract!(R::Tensor{<:Number,NR},labelsR::NTuple{NR,Int}, + T1::Tensor{<:Number,N1},labelsT1::NTuple{N1,Int}, + T2::Tensor{<:Number,N2},labelsT2::NTuple{N2,Int}) where {NR,N1,N2} if N1==0 # TODO: replace with an add! function? # What about doing `R .= T1[] .* PermutedDimsArray(T2,perm)`? @@ -169,13 +185,14 @@ function contract!(R::DenseTensor,labelsR, elseif N2==0 perm = getperm(labelsR,labelsT1) permutedims!(R,T1,perm,(r,t1)->T2[]*t1) - elseif isdisjoint(labelsT1,labelsT2) + elseif N1+N2==NR # TODO: permute T1 and T2 appropriately first (can be more efficient # then permuting the result of T1⊗T2) + # TODO: implement the in-place version directly Rp = T1⊗T2 - labelsRp = union(labelsT1,labelsT2) + labelsRp = unioninds(labelsT1,labelsT2) perm = getperm(labelsR,labelsRp) - if !istrivial(perm) + if !is_trivial_permutation(perm) permutedims!(R,Rp,perm) else copyto!(R,Rp) @@ -186,6 +203,8 @@ function contract!(R::DenseTensor,labelsR, return R end +Base.copyto!(R::Tensor,T::Tensor) = copyto!(store(R),store(T)) + # TODO: make sure this is doing type promotion correctly # since we are calling BLAS (need to promote T1 and T2 to # the same types) @@ -198,15 +217,15 @@ function _contract!(R::DenseTensor,labelsR, return R end -function _contract!(C::DenseTensor{T}, - A::DenseTensor{T}, - B::DenseTensor{T}, +function _contract!(C::DenseTensor{T,NC}, + A::DenseTensor{T,NA}, + B::DenseTensor{T,NB}, props::ContractionProperties, α::T=one(T), - β::T=zero(T)) where {T} + β::T=zero(T)) where {T,NC,NA,NB} tA = 'N' if props.permuteA - aref = reshape(permutedims(A,props.PA),props.dmid,props.dleft) + aref = reshape(permutedims(A,NTuple{NA,Int}(props.PA)),props.dmid,props.dleft) tA = 'T' else #A doesn't have to be permuted @@ -220,7 +239,7 @@ function _contract!(C::DenseTensor{T}, tB = 'N' if props.permuteB - bref = reshape(permutedims(B,props.PB),props.dmid,props.dright) + bref = reshape(permutedims(B,NTuple{NB,Int}(props.PB)),props.dmid,props.dright) else if Btrans(props) bref = reshape(B,props.dright,props.dmid) @@ -249,10 +268,13 @@ function _contract!(C::DenseTensor{T}, end #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) + + # TODO: make sure this is fast with Tensor{ElT,2}, or + # convert aref and bref to Matrix BLAS.gemm!(tA,tB,α,aref,bref,β,cref) if props.permuteC - permutedims!(C,reshape(cref,props.newCrange...),props.PC) + permutedims!(C,reshape(cref,props.newCrange...),NTuple{NC,Int}(props.PC)) end return C end diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index 54e5832d3b..1a9b7f4888 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -36,10 +36,13 @@ Base.eltype(::Diag{T}) where {T} = eltype(T) Base.eltype(::Type{Diag{T}}) where {T} = eltype(T) # Deal with uniform Diag conversion -Base.convert(::Type{StorageT},D::Diag) where {StorageT<:Diag{T}} where {T} = Diag{T}(data(D)) +Base.convert(::Type{<:Diag{T}},D::Diag) where {T} = Diag{T}(data(D)) Base.similar(D::Diag{T}) where {T} = Diag{T}(similar(data(D))) +Base.similar(D::Diag{T},inds) where {T} = Diag{T}(similar(data(D),minimum(dims(inds)))) +Base.similar(D::Type{<:NonuniformDiag{T}},inds) where {T} = Diag{T}(similar(T,length(inds)==0 ? 1 : minimum(dims(inds)))) Base.similar(D::UniformDiag{T}) where {T<:Number} = Diag{T}(zero(T)) +Base.similar(::Type{<:UniformDiag{T}},inds) where {T<:Number} = Diag{T}(zero(T)) #*(D::Diag{T},x::S) where {T<:AbstractVector,S<:Number} = Dense{promote_type(eltype(D),S)}(x*data(D)) #*(x::Number,D::Diag) = D*x @@ -60,21 +63,35 @@ Base.promote_rule(::Type{UniformDiag{T1}},::Type{NonuniformDiag{T2}}) where Base.promote_rule(::Type{Dense{T1}},::Type{Diag{T2}}) where {T1,T2} = Dense{promote_type(T1,eltype(T2))} -const DiagTensor{El,N,Inds} = Tensor{El,N,<:Diag,Inds} -const NonuniformDiagTensor{El,N,Inds} = Tensor{El,N,<:NonuniformDiag,Inds} -const UniformDiagTensor{El,N,Inds} = Tensor{El,N,<:UniformDiag,Inds} +const DiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Diag} +const NonuniformDiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where + {StoreT<:NonuniformDiag} +const UniformDiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where + {StoreT<:UniformDiag} Base.IndexStyle(::Type{TensorT}) where {TensorT<:DiagTensor} = IndexCartesian() # TODO: this needs to be better (promote element type, check order compatibility, # etc. -function Base.convert(::Type{TensorT}, T::DiagTensor{ElT,N}) where - {TensorT<:Tensor{ElT,N,<:Dense}} where {ElT,N} +function Base.convert(::Type{<:Tensor{ElT,N,<:Dense}}, T::DiagTensor{ElT,N}) where {ElT,N} return dense(T) end +function Base.Array(T::DiagTensor{ElT,N}) where {ElT,N} + A = zeros(ElT,dims(T)) + for i = 1:diag_length(T) + diag_inds = CartesianIndex{N}(ntuple(_->i,Val(N))) + A[diag_inds] = T[diag_inds] + end + return A +end +Base.Matrix(T::DiagTensor{<:Number,2}) = Array(T) +Base.Vector(T::DiagTensor{<:Number,1}) = Array(T) + diag_length(T::DiagTensor) = minimum(dims(T)) +diag_length(T::DiagTensor{<:Number,0}) = 1 +# Add a get_diagindex(T::DiagTensor,ind::Int) function to avoid checking the input function Base.getindex(T::DiagTensor{ElT,N},inds::Vararg{Int,N}) where {ElT,N} if all(==(inds[1]),inds) return store(T)[inds[1]] @@ -82,6 +99,8 @@ function Base.getindex(T::DiagTensor{ElT,N},inds::Vararg{Int,N}) where {ElT,N} return zero(eltype(ElT)) end end +Base.getindex(T::DiagTensor{<:Number,1},ind::Int) = store(T)[ind] +Base.getindex(T::DiagTensor{<:Number,0}) = store(T)[1] # Set diagonal elements # Throw error for off-diagonal @@ -89,6 +108,8 @@ function Base.setindex!(T::DiagTensor{<:Number,N},val,inds::Vararg{Int,N}) where all(==(inds[1]),inds) || error("Cannot set off-diagonal element of Diag storage") return store(T)[inds[1]] = val end +Base.setindex!(T::DiagTensor{<:Number,1},val,ind::Int) = ( store(T)[ind] = val ) +Base.setindex!(T::DiagTensor{<:Number,0},val) = ( store(T)[1] = val ) function Base.setindex!(T::UniformDiagTensor{<:Number,N},val,inds::Vararg{Int,N}) where {N} error("Cannot set elements of a uniform Diag storage") @@ -99,22 +120,33 @@ function dense(D::DiagTensor) return Tensor(Dense{eltype(D)}(vec(Array(D))),inds(D)) end -# TODO: implement this in a sparse way -# For now, we will just make them dense since the output is dense anyway -function storage_outer(D1::Diag{T1},is1::IndexSet,D2::Diag{T2},is2::IndexSet) where {T1,T2} - A1 = storage_dense(D1,is1) - A2 = storage_dense(D2,is2) - return storage_outer(A1,is1,A2,is2) +function outer(T1::DiagTensor{ElT1,N1},T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} + indsR = unioninds(inds(T1),inds(T2)) + R = Tensor(Dense{promote_type(ElT1,ElT2)}(zeros(dim(indsR))),indsR) + for i1 = 1:diag_length(T1), i2 = 1:diag_length(T2) + diag_inds1 = CartesianIndex{N1}(ntuple(_->i1,Val(N1))) + diag_inds2 = CartesianIndex{N2}(ntuple(_->i2,Val(N2))) + indsR = CartesianIndex{N1+N2}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(N1+N2))) + R[indsR] = T1[diag_inds1]*T2[diag_inds2] + end + return R end +# For now, we will just make them dense since the output is dense anyway +#function storage_outer(D1::Diag{T1},is1::IndexSet,D2::Diag{T2},is2::IndexSet) where {T1,T2} +# A1 = storage_dense(D1,is1) +# A2 = storage_dense(D2,is2) +# return storage_outer(A1,is1,A2,is2) +#end + # Convert to an array by setting the diagonal elements -function storage_convert(::Type{Array},D::Diag,is::IndexSet) - A = zeros(eltype(D),dims(is)) - for i = 1:minDim(is) - A[fill(i,length(is))...] = D[i] - end - return A -end +#function storage_convert(::Type{Array},D::Diag,is::IndexSet) +# A = zeros(eltype(D),dims(is)) +# for i = 1:minDim(is) +# A[fill(i,length(is))...] = D[i] +# end +# return A +#end #function storage_convert(::Type{Dense{T}},D::Diag,is::IndexSet) where T # return Dense{T}(vec(storage_convert(Array,D,is))) @@ -325,6 +357,34 @@ function permutedims!!(R::DenseTensor{ElR,N}, return R end +function _contract!(R::DiagTensor{ElR,NR},labelsR, + T1::DiagTensor{<:Number,N1},labelsT1, + T2::DiagTensor{<:Number,N2},labelsT2) where {ElR,NR,N1,N2} + if NR==0 # If all indices of A and B are contracted + # all indices are summed over, just add the product of the diagonal + # elements of A and B + # Need to set to zero since + # Cdata is originally uninitialized memory + # (so causes random output) + + R[1] = zero(ElR) + for i = 1:diag_length(T1) + diag_inds = CartesianIndex{N1}(ntuple(_->i,Val(N1))) + R[1] += T1[diag_inds]*T2[diag_inds] + end + else + min_dim = min(diag_length(T1),diag_length(T2)) + # not all indices are summed over, set the diagonals of the result + # to the product of the diagonals of A and B + for i = 1:min_dim + diag_inds_R = CartesianIndex{NR}(ntuple(_->i,Val(NR))) + diag_inds_T1 = CartesianIndex{N1}(ntuple(_->i,Val(N1))) + diag_inds_T2 = CartesianIndex{N2}(ntuple(_->i,Val(N2))) + R[diag_inds_R] = T1[diag_inds_T1]*T2[diag_inds_T2] + end + end +end + # TODO: make this a special version of storage_add!() # This shouldn't do anything to the data #function storage_permute!(Bstore::Diag, @@ -721,27 +781,27 @@ end # Maybe this works for uniform storage as well -function _contract_diag_diag!(Cdata::Vector{T},Clabels::Vector{Int}, - Adata::Vector{T},Alabels::Vector{Int}, - Bdata::Vector{T},Blabels::Vector{Int}) where T - if length(Clabels) == 0 # If all indices of A and B are contracted - # all indices are summed over, just add the product of the diagonal - # elements of A and B - Adim = length(Adata) # == length(Bdata) - # Need to set to zero since - # Cdata is originally uninitialized memory - # (so causes random output) - Cdata[1] = zero(T) - for i = 1:Adim - Cdata[1] += Adata[i]*Bdata[i] - end - else - min_dim = min(length(Adata),length(Bdata)) - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - for i = 1:min_dim - Cdata[i] = Adata[i]*Bdata[i] - end - end -end +#function _contract_diag_diag!(Cdata::Vector{T},Clabels::Vector{Int}, +# Adata::Vector{T},Alabels::Vector{Int}, +# Bdata::Vector{T},Blabels::Vector{Int}) where T +# if length(Clabels) == 0 # If all indices of A and B are contracted +# # all indices are summed over, just add the product of the diagonal +# # elements of A and B +# Adim = length(Adata) # == length(Bdata) +# # Need to set to zero since +# # Cdata is originally uninitialized memory +# # (so causes random output) +# Cdata[1] = zero(T) +# for i = 1:Adim +# Cdata[1] += Adata[i]*Bdata[i] +# end +# else +# min_dim = min(length(Adata),length(Bdata)) +# # not all indices are summed over, set the diagonals of the result +# # to the product of the diagonals of A and B +# for i = 1:min_dim +# Cdata[i] = Adata[i]*Bdata[i] +# end +# end +#end diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index e50b9e905c..db212ab7a6 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -35,7 +35,7 @@ Base.strides(is::Dims) = Base.size_to_strides(1, dims(is)...) # as Dims{4}) StaticArrays.similar_type(::Type{IndsT},::Val{N}) where {IndsT<:Dims,N} = Dims{N} -unioninds(is1::Dims{N1},is2::Dims{N2}) where {N1,N2} = Dims{N1+N2}(is1...,is2...) +unioninds(is1::Dims{N1},is2::Dims{N2}) where {N1,N2} = Dims{N1+N2}((is1...,is2...)) # The size is obtained from the indices dims(T::Tensor) = dims(inds(T)) diff --git a/test/test_itensor_diag.jl b/test/test_itensor_diag.jl index 1828b7ef5b..bca21b6f67 100644 --- a/test/test_itensor_diag.jl +++ b/test/test_itensor_diag.jl @@ -215,7 +215,7 @@ using ITensors, @test D2*D1 ≈ dense(D1)*dense(D2) end - @testset "Contraction Diag*Diag (all contracted)" begin + @testset "Contraction Diag*Diag (general)" begin D1 = diagITensor(v,l,i,k,j) D2 = diagITensor(vr,m,k,n,l) From 44ca757239190d760d3da0770633e606064a1981 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 13 Sep 2019 12:09:48 -0400 Subject: [PATCH 11/27] Fix some type promotions of Diag storage --- src/tensor/dense.jl | 15 +++++++++++++++ src/tensor/diag.jl | 33 ++++++++++++++++++++++----------- src/tensor/tensor.jl | 1 + test/test_itensor_dense.jl | 2 +- 4 files changed, 39 insertions(+), 12 deletions(-) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index a0830aecf5..0777746258 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -63,6 +63,7 @@ function Base.convert(::Type{TensorR}, end # Reshape a DenseTensor using the specified dimensions +# This returns a view into the same Tensor data function Base.reshape(T::DenseTensor,dims) dim(T)==dim(dims) || error("Total new dimension must be the same as the old dimension") return Tensor(store(T),dims) @@ -72,6 +73,9 @@ function Base.reshape(T::DenseTensor,dims::Dims) dim(T)==dim(dims) || error("Total new dimension must be the same as the old dimension") return Tensor(store(T),dims) end +function Base.reshape(T::DenseTensor,dims::Int...) + return Tensor(store(T),tuple(dims...)) +end # Create an Array that is a view of the Dense Tensor # Useful for using Base Array functions @@ -154,6 +158,17 @@ function Base.permutedims!(TP::DenseTensor{<:Number,0}, return TP end +function outer!(R::DenseTensor,T1::DenseTensor,T2::DenseTensor) + # TODO: make vec(::Tensor) work? Would this be a view? What + # happens to sparse tensors like Diag? + v1 = vec(T1) + v2 = vec(T2) + R .= zero(ElR) + RM = reshape(R,dim(v1),dim(v2)) + BLAS.ger!(one(ElR),v1,v2,R) + return R +end + function outer(T1::DenseTensor{ElT1},T2::DenseTensor{ElT2}) where {ElT1,ElT2} array_outer = vec(Array(T1))*transpose(vec(Array(T2))) inds_outer = unioninds(inds(T1),inds(T2)) diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index 1a9b7f4888..1e2de465a4 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -33,7 +33,7 @@ Base.complex(D::Diag{T}) where {T} = Diag{complex(T)}(complex(data(D))) Base.copy(D::Diag{T}) where {T} = Diag{T}(copy(data(D))) Base.eltype(::Diag{T}) where {T} = eltype(T) -Base.eltype(::Type{Diag{T}}) where {T} = eltype(T) +Base.eltype(::Type{<:Diag{T}}) where {T} = eltype(T) # Deal with uniform Diag conversion Base.convert(::Type{<:Diag{T}},D::Diag) where {T} = Diag{T}(data(D)) @@ -52,15 +52,18 @@ Base.similar(::Type{<:UniformDiag{T}},inds) where {T<:Number} = Diag{T}(zero(T)) # Useful for knowing how conversions should work when adding and contracting # -Base.promote_rule(::Type{Diag{T1}},::Type{Diag{T2}}) where - {T1,T2} = Diag{promote_type(T1,T2)} +Base.promote_rule(::Type{<:UniformDiag{T1}},::Type{<:UniformDiag{T2}}) where + {T1<:Number,T2<:Number} = Diag{promote_type(T1,T2)} + +Base.promote_rule(::Type{<:NonuniformDiag{T1}},::Type{<:NonuniformDiag{T2}}) where + {T1<:AbstractVector,T2<:AbstractVector} = Diag{promote_type(T1,T2)} # TODO: how do we make this work more generally for T2<:AbstractVector{S2}? # Make a similar_type(AbstractVector{S2},T1) -> AbstractVector{T1} function? -Base.promote_rule(::Type{UniformDiag{T1}},::Type{NonuniformDiag{T2}}) where - {T1,T2<:Vector{S2}} where S2 = Diag{Vector{promote_type(T1,S2)}} +Base.promote_rule(::Type{<:UniformDiag{T1}},::Type{<:NonuniformDiag{T2}}) where + {T1<:Number,T2<:Vector{S2}} where S2 = Diag{Vector{promote_type(T1,S2)}} -Base.promote_rule(::Type{Dense{T1}},::Type{Diag{T2}}) where +Base.promote_rule(::Type{<:Dense{T1}},::Type{<:Diag{T2}}) where {T1,T2} = Dense{promote_type(T1,eltype(T2))} const DiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Diag} @@ -69,7 +72,7 @@ const NonuniformDiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} wher const UniformDiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:UniformDiag} -Base.IndexStyle(::Type{TensorT}) where {TensorT<:DiagTensor} = IndexCartesian() +Base.IndexStyle(::Type{<:DiagTensor}) = IndexCartesian() # TODO: this needs to be better (promote element type, check order compatibility, # etc. @@ -120,18 +123,26 @@ function dense(D::DiagTensor) return Tensor(Dense{eltype(D)}(vec(Array(D))),inds(D)) end -function outer(T1::DiagTensor{ElT1,N1},T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} - indsR = unioninds(inds(T1),inds(T2)) - R = Tensor(Dense{promote_type(ElT1,ElT2)}(zeros(dim(indsR))),indsR) +function outer!(R::DenseTensor{<:Number,NR}, + T1::DiagTensor{<:Number,N1}, + T2::DiagTensor{<:Number,N2}) where {NR,N1,N2} for i1 = 1:diag_length(T1), i2 = 1:diag_length(T2) diag_inds1 = CartesianIndex{N1}(ntuple(_->i1,Val(N1))) diag_inds2 = CartesianIndex{N2}(ntuple(_->i2,Val(N2))) - indsR = CartesianIndex{N1+N2}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(N1+N2))) + indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR))) R[indsR] = T1[diag_inds1]*T2[diag_inds2] end return R end +# Right an in-place version +function outer(T1::DiagTensor{ElT1,N1},T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} + indsR = unioninds(inds(T1),inds(T2)) + R = Tensor(Dense{promote_type(ElT1,ElT2)}(zeros(dim(indsR))),indsR) + outer!(R,T1,T2) + return R +end + # For now, we will just make them dense since the output is dense anyway #function storage_outer(D1::Diag{T1},is1::IndexSet,D2::Diag{T2},is2::IndexSet) where {T1,T2} # A1 = storage_dense(D1,is1) diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index db212ab7a6..654c7d9c8a 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -28,6 +28,7 @@ dim(ds::Dims) = prod(ds) # This may be a bad idea to overload? # Type piracy? Base.strides(is::Dims) = Base.size_to_strides(1, dims(is)...) +Base.copy(ds::Dims) = ds # This is to help with some generic programming in the Tensor # code (it helps to construct a Tuple(::NTuple{N,Int}) where the diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index 6fdb644478..19ceafe2e2 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -489,7 +489,7 @@ end T = T + swapprime(dag(T),0,1) U,D,u = eigenHermitian(T) @test A≈U*D*prime(dag(U)) - UUᵀ = U*prime(dag(U),u) + UUᴴ = U*prime(dag(U),u) @test UUᴴ ≈ δ(u,u') atol=1e-14 end From 9e57bed6604f04e8aea2fb8b4b53e9ee3d4b0e3c Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 13 Sep 2019 16:58:26 -0400 Subject: [PATCH 12/27] Added Dense*Diag contraction (most Dense and Diag tests passing) --- src/indexset.jl | 9 +- src/tensor/dense.jl | 39 ++- src/tensor/diag.jl | 551 +++++++++++++++++++++++-------------- src/tensor/tensor.jl | 23 +- test/test_itensor_dense.jl | 3 +- 5 files changed, 406 insertions(+), 219 deletions(-) diff --git a/src/indexset.jl b/src/indexset.jl index 5fdfb1f2bb..f7ea219d95 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -55,6 +55,12 @@ IndexSet(inds::IndexSet,i::Index) = IndexSet(inds...,i) IndexSet(i::Index,inds::IndexSet) = IndexSet(i,inds...) IndexSet(is1::IndexSet,is2::IndexSet) = IndexSet(is1...,is2...) +# This is used in type promotion in the Tensor contraction code +Base.promote_rule(::Type{<:IndexSet},::Type{Val{N}}) where {N} = IndexSet{N} + +ValLength(::Type{IndexSet{N}}) where {N} = Val{N} +ValLength(::IndexSet{N}) where {N} = Val(N) + # TODO: make a version that accepts an arbitrary set of IndexSets # as well as mixtures of seperate Indices and Tuples of Indices. # Look at jointuples in the DenseTensor decomposition logic. @@ -72,7 +78,8 @@ end Base.getindex(is::IndexSet,n::Integer) = getindex(is.inds,n) Base.setindex!(is::IndexSet,i::Index,n::Integer) = setindex!(is.inds,i,n) -Base.length(is::IndexSet) = length(is.inds) +Base.length(is::IndexSet{N}) where {N} = N +Base.length(::Type{IndexSet{N}}) where {N} = N order(is::IndexSet) = length(is) Base.copy(is::IndexSet) = IndexSet(copy(is.inds)) dims(is::IndexSet{N}) where {N} = ntuple(i->dim(is[i]),Val(N)) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 0777746258..7635659179 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -19,12 +19,19 @@ Base.getindex(D::Dense,i::Integer) = data(D)[i] Base.setindex!(D::Dense,v,i::Integer) = (data(D)[i] = v) Base.similar(D::Dense{T}) where {T} = Dense{T}(similar(data(D))) + +# TODO: make this just take Int, the length of the data Base.similar(D::Dense{T},dims) where {T} = Dense{T}(similar(data(D),dim(dims))) + +# TODO: make this just take Int, the length of the data Base.similar(::Type{Dense{T}},dims) where {T} = Dense{T}(similar(Vector{T},dim(dims))) + Base.similar(D::Dense,::Type{T}) where {T} = Dense{T}(similar(data(D),T)) Base.copy(D::Dense{T}) where {T} = Dense{T}(copy(data(D))) Base.copyto!(D1::Dense,D2::Dense) = copyto!(data(D1),data(D2)) +Base.zeros(::Type{Dense{T}},dim::Int) where {T} = Dense{T}(zeros(T,dim)) + # convert to complex # TODO: this could be a generic TensorStorage function Base.complex(D::Dense{T}) where {T} = Dense{complex(T)}(complex(data(D))) @@ -176,16 +183,30 @@ function outer(T1::DenseTensor{ElT1},T2::DenseTensor{ElT2}) where {ElT1,ElT2} end const ⊗ = outer +function contraction_output_type(TensorT1::Type{<:DenseTensor}, + TensorT2::Type{<:DenseTensor}, + indsR) + return similar_type(promote_type(TensorT1,TensorT2),indsR) +end + +function contraction_output(TensorT1::Type{<:DenseTensor}, + TensorT2::Type{<:DenseTensor}, + indsR) + return similar(contraction_output_type(TensorT1,TensorT2,indsR),indsR) +end + # TODO: move to tensor.jl? function contract(T1::Tensor{<:Number,N1},labelsT1, T2::Tensor{<:Number,N2},labelsT2) where {N1,N2} indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) - if N1+N2==length(indsR) - R = T1⊗T2 - else - R = similar(promote_type(typeof(T1),typeof(T2)),indsR) - contract!(R,labelsR,T1,labelsT1,T2,labelsT2) - end + #if N1+N2==length(indsR) + # R = T1⊗T2 + #else + R = contraction_output(typeof(T1),typeof(T2),indsR) + #R = similar(contraction_output_type(typeof(T1),typeof(T2),Val{length(indsR)}),indsR) + #R = similar(promote_type(typeof(T1),typeof(T2)),indsR) + contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + #end return R end @@ -204,13 +225,11 @@ function contract!(R::Tensor{<:Number,NR},labelsR::NTuple{NR,Int}, # TODO: permute T1 and T2 appropriately first (can be more efficient # then permuting the result of T1⊗T2) # TODO: implement the in-place version directly - Rp = T1⊗T2 + outer!(R,T1,T2) labelsRp = unioninds(labelsT1,labelsT2) perm = getperm(labelsR,labelsRp) if !is_trivial_permutation(perm) - permutedims!(R,Rp,perm) - else - copyto!(R,Rp) + permutedims!(R,copy(R),perm) end else _contract!(R,labelsR,T1,labelsT1,T2,labelsT2) diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index 1e2de465a4..2faf6bd213 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -39,11 +39,18 @@ Base.eltype(::Type{<:Diag{T}}) where {T} = eltype(T) Base.convert(::Type{<:Diag{T}},D::Diag) where {T} = Diag{T}(data(D)) Base.similar(D::Diag{T}) where {T} = Diag{T}(similar(data(D))) + +# TODO: write in terms of ::Int, not inds Base.similar(D::Diag{T},inds) where {T} = Diag{T}(similar(data(D),minimum(dims(inds)))) Base.similar(D::Type{<:NonuniformDiag{T}},inds) where {T} = Diag{T}(similar(T,length(inds)==0 ? 1 : minimum(dims(inds)))) + Base.similar(D::UniformDiag{T}) where {T<:Number} = Diag{T}(zero(T)) Base.similar(::Type{<:UniformDiag{T}},inds) where {T<:Number} = Diag{T}(zero(T)) +# TODO: make this work for other storage besides Vector +Base.zeros(::Type{Diag{T}},dim::Int64) where {T<:AbstractVector} = Diag{T}(zeros(eltype(T),dim)) +Base.zeros(::Type{Diag{T}},dim::Int64) where {T<:Number} = Diag{T}(zero(T)) + #*(D::Diag{T},x::S) where {T<:AbstractVector,S<:Number} = Dense{promote_type(eltype(D),S)}(x*data(D)) #*(x::Number,D::Diag) = D*x @@ -66,6 +73,9 @@ Base.promote_rule(::Type{<:UniformDiag{T1}},::Type{<:NonuniformDiag{T2}}) where Base.promote_rule(::Type{<:Dense{T1}},::Type{<:Diag{T2}}) where {T1,T2} = Dense{promote_type(T1,eltype(T2))} +# Convert a Diag storage type to the closest Dense storage type +dense(::Type{<:Diag{T}}) where {T} = Dense{eltype(T)} + const DiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Diag} const NonuniformDiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:NonuniformDiag} @@ -80,6 +90,49 @@ function Base.convert(::Type{<:Tensor{ElT,N,<:Dense}}, T::DiagTensor{ElT,N}) whe return dense(T) end + +# These are rules for determining the output of a pairwise contraction of Tensors +# (given the indices of the output tensors) +function contraction_output_type(TensorT1::Type{<:DiagTensor}, + TensorT2::Type{<:DenseTensor}, + indsR) + return similar_type(promote_type(TensorT1,TensorT2),indsR) +end +contraction_output_type(TensorT1::Type{<:DenseTensor}, + TensorT2::Type{<:DiagTensor}, + indsR) = contraction_output_type(TensorT2,TensorT1,indsR) + +# This performs the logic that DiagTensor*DiagTensor -> DiagTensor if it is not an outer +# product but -> DenseTensor if it is +# TODO: if the tensors are both order 2 (or less), or if there is an Index replacement, +# then they remain diagonal. Should we limit DiagTensor*DiagTensor to cases that +# result in a DiagTensor, for efficiency and type stability? What about a general +# SparseTensor result? +function contraction_output_type(TensorT1::Type{<:DiagTensor{<:Number,N1}}, + TensorT2::Type{<:DiagTensor{<:Number,N2}}, + indsR) where {N1,N2} + if ValLength(indsR)===Val(N1+N2) # Turn into is_outer(inds1,inds2,indsR) function? How + # does type inference work with arithmatic of compile time values? + return similar_type(dense(promote_type(TensorT1,TensorT2)),indsR) + end + return similar_type(promote_type(TensorT1,TensorT2),indsR) +end + +# TODO: move to tensor.jl? +zero_contraction_output(TensorT1::Type{<:Tensor},TensorT2::Type{<:Tensor},indsR) = zeros(contraction_output_type(TensorT1,TensorT2,typeof(indsR)),indsR) + +function contraction_output(TensorT1::Type{<:DiagTensor}, + TensorT2::Type{<:DiagTensor}, + indsR) + return zero_contraction_output(TensorT1,TensorT2,indsR) +end +function contraction_output(TensorT1::Type{<:DiagTensor}, + TensorT2::Type{<:Tensor}, + indsR) + return zero_contraction_output(TensorT1,TensorT2,indsR) +end +contraction_output(TensorT1::Type{<:Tensor},TensorT2::Type{<:DiagTensor},indsR) = contraction_output(TensorT2,TensorT1,indsR) + function Base.Array(T::DiagTensor{ElT,N}) where {ElT,N} A = zeros(ElT,dims(T)) for i = 1:diag_length(T) @@ -123,6 +176,10 @@ function dense(D::DiagTensor) return Tensor(Dense{eltype(D)}(vec(Array(D))),inds(D)) end +function dense(::Type{<:DiagTensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT,IndsT} + return Tensor{ElT,N,dense(StoreT),IndsT} +end + function outer!(R::DenseTensor{<:Number,NR}, T1::DiagTensor{<:Number,N1}, T2::DiagTensor{<:Number,N2}) where {NR,N1,N2} @@ -429,216 +486,222 @@ function storage_scalar(D::Diag{T}) where {T<:Number} end # Contract function for Diag*Dense -function _contract(Cinds::IndexSet, - Clabels::Vector{Int}, - Astore::Diag{Vector{SA}}, - Ainds::IndexSet, - Alabels::Vector{Int}, - Bstore::Dense{SB}, - Binds::IndexSet, - Blabels::Vector{Int}) where {SA<:Number,SB<:Number} - SC = promote_type(SA,SB) - - # Convert the arrays to a common type - # since we will call BLAS - Astore = convert(Diag{Vector{SC}},Astore) - Bstore = convert(Dense{SC},Bstore) - - Adims = dims(Ainds) - Bdims = dims(Binds) - Cdims = dims(Cinds) - - # Create storage for output tensor - # This needs to be filled with zeros, since in general - # the output of Diag*Dense will be sparse - Cstore = Dense{SC}(zero(SC),prod(Cdims)) - - # This seems unnecessary, and makes this function - # non-generic. I think Dense should already store - # the properly-shaped data (and be parametrized by the - # order, and maybe store the size?) - Adata = data(Astore) - Bdata = reshape(data(Bstore),Bdims) - Cdata = reshape(data(Cstore),Cdims) - - # Functions to do the contraction - if(length(Alabels)==0) - _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) - elseif(length(Blabels)==0) - _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) - else - _contract_diag_dense!(Cdata,Clabels,Adata,Alabels,Bdata,Blabels) - end - - return Cstore -end +#function _contract(Cinds::IndexSet, +# Clabels::Vector{Int}, +# Astore::Diag{Vector{SA}}, +# Ainds::IndexSet, +# Alabels::Vector{Int}, +# Bstore::Dense{SB}, +# Binds::IndexSet, +# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} +# SC = promote_type(SA,SB) +# +# # Convert the arrays to a common type +# # since we will call BLAS +# Astore = convert(Diag{Vector{SC}},Astore) +# Bstore = convert(Dense{SC},Bstore) +# +# Adims = dims(Ainds) +# Bdims = dims(Binds) +# Cdims = dims(Cinds) +# +# # Create storage for output tensor +# # This needs to be filled with zeros, since in general +# # the output of Diag*Dense will be sparse +# Cstore = Dense{SC}(zero(SC),prod(Cdims)) +# +# # This seems unnecessary, and makes this function +# # non-generic. I think Dense should already store +# # the properly-shaped data (and be parametrized by the +# # order, and maybe store the size?) +# Adata = data(Astore) +# Bdata = reshape(data(Bstore),Bdims) +# Cdata = reshape(data(Cstore),Cdims) +# +# # Functions to do the contraction +# if(length(Alabels)==0) +# _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) +# elseif(length(Blabels)==0) +# _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) +# else +# _contract_diag_dense!(Cdata,Clabels,Adata,Alabels,Bdata,Blabels) +# end +# +# return Cstore +#end # Contract function for Diag uniform * Dense -function _contract(Cinds::IndexSet, - Clabels::Vector{Int}, - Astore::Diag{SA}, - Ainds::IndexSet, - Alabels::Vector{Int}, - Bstore::Dense{SB}, - Binds::IndexSet, - Blabels::Vector{Int}) where {SA<:Number,SB<:Number} - SC = promote_type(SA,SB) - - # Convert the arrays to a common type - # since we will call BLAS - #Astore = convert(Diag{SC},Astore) - #Bstore = convert(Dense{SC},Bstore) - - Bdims = dims(Binds) - Cdims = dims(Cinds) - - # Create storage for output tensor - # This needs to be filled with zeros, since in general - # the output of Diag*Dense will be sparse - Cstore = Dense{SC}(zero(SC),prod(Cdims)) - - # This seems unnecessary, and makes this function - # non-generic. I think Dense should already store - # the properly-shaped data (and be parametrized by the - # order, and maybe store the size?) - Adata = data(Astore) - Bdata = reshape(data(Bstore),Bdims) - Cdata = reshape(data(Cstore),Cdims) - - # Functions to do the contraction - if(length(Alabels)==0) - _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) - elseif(length(Blabels)==0) - _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) - elseif(length(Alabels)==2 && length(Clabels)==length(Blabels)) - # This is just a replacement of the indices - # TODO: This logic should be higher up - for i = 1:length(Clabels) - if Blabels[i] > 0 - Cinds[i] = Binds[i] - else - if Alabels[1] > 0 - Cinds[i] = Ainds[1] - else - Cinds[i] = Ainds[2] - end - end - end - Cdata .= Adata .* Bdata - else - _contract_diag_dense!(Cstore,Cinds,Clabels, - Astore,Ainds,Alabels, - Bstore,Binds,Blabels) - end - - return Cstore -end +#function _contract(Cinds::IndexSet, +# Clabels::Vector{Int}, +# Astore::Diag{SA}, +# Ainds::IndexSet, +# Alabels::Vector{Int}, +# Bstore::Dense{SB}, +# Binds::IndexSet, +# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} +# SC = promote_type(SA,SB) +# +# # Convert the arrays to a common type +# # since we will call BLAS +# #Astore = convert(Diag{SC},Astore) +# #Bstore = convert(Dense{SC},Bstore) +# +# Bdims = dims(Binds) +# Cdims = dims(Cinds) +# +# # Create storage for output tensor +# # This needs to be filled with zeros, since in general +# # the output of Diag*Dense will be sparse +# Cstore = Dense{SC}(zero(SC),prod(Cdims)) +# +# # This seems unnecessary, and makes this function +# # non-generic. I think Dense should already store +# # the properly-shaped data (and be parametrized by the +# # order, and maybe store the size?) +# Adata = data(Astore) +# Bdata = reshape(data(Bstore),Bdims) +# Cdata = reshape(data(Cstore),Cdims) +# +# # Functions to do the contraction +# if(length(Alabels)==0) +# _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) +# elseif(length(Blabels)==0) +# _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) +# elseif(length(Alabels)==2 && length(Clabels)==length(Blabels)) +# # This is just a replacement of the indices +# # TODO: This logic should be higher up +# for i = 1:length(Clabels) +# if Blabels[i] > 0 +# Cinds[i] = Binds[i] +# else +# if Alabels[1] > 0 +# Cinds[i] = Ainds[1] +# else +# Cinds[i] = Ainds[2] +# end +# end +# end +# Cdata .= Adata .* Bdata +# else +# _contract_diag_dense!(Cstore,Cinds,Clabels, +# Astore,Ainds,Alabels, +# Bstore,Binds,Blabels) +# end +# +# return Cstore +#end # Contract function for Dense*Diag (calls Diag*Dense) -function _contract(Cinds::IndexSet, - Clabels::Vector{Int}, - Astore::Dense, - Ainds::IndexSet, - Alabels::Vector{Int}, - Bstore::Diag, - Binds::IndexSet, - Blabels::Vector{Int}) - return _contract(Cinds,Clabels, - Bstore,Binds,Blabels, - Astore,Ainds,Alabels) -end +#function _contract(Cinds::IndexSet, +# Clabels::Vector{Int}, +# Astore::Dense, +# Ainds::IndexSet, +# Alabels::Vector{Int}, +# Bstore::Diag, +# Binds::IndexSet, +# Blabels::Vector{Int}) +# return _contract(Cinds,Clabels, +# Bstore,Binds,Blabels, +# Astore,Ainds,Alabels) +#end # Contract function for Diag*Diag -function _contract(Cinds::IndexSet, - Clabels::Vector{Int}, - Astore::Diag{Vector{SA}}, - Ainds::IndexSet, - Alabels::Vector{Int}, - Bstore::Diag{Vector{SB}}, - Binds::IndexSet, - Blabels::Vector{Int}) where {SA<:Number,SB<:Number} - SC = promote_type(SA,SB) - - # Convert the arrays to a common type - # since we will call BLAS - Astore = convert(Diag{Vector{SC}},Astore) - Bstore = convert(Diag{Vector{SC}},Bstore) - Cstore = Diag{Vector{SC}}(minDim(Cinds)) - - Adata = data(Astore) - Bdata = data(Bstore) - Cdata = data(Cstore) - - if(length(Alabels)==0) - _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) - elseif(length(Blabels)==0) - _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) - else - _contract_diag_diag!(Cdata,Clabels,Adata,Alabels,Bdata,Blabels) - end - - return Cstore -end +#function _contract(Cinds::IndexSet, +# Clabels::Vector{Int}, +# Astore::Diag{Vector{SA}}, +# Ainds::IndexSet, +# Alabels::Vector{Int}, +# Bstore::Diag{Vector{SB}}, +# Binds::IndexSet, +# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} +# SC = promote_type(SA,SB) +# +# # Convert the arrays to a common type +# # since we will call BLAS +# Astore = convert(Diag{Vector{SC}},Astore) +# Bstore = convert(Diag{Vector{SC}},Bstore) +# Cstore = Diag{Vector{SC}}(minDim(Cinds)) +# +# Adata = data(Astore) +# Bdata = data(Bstore) +# Cdata = data(Cstore) +# +# if(length(Alabels)==0) +# _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) +# elseif(length(Blabels)==0) +# _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) +# else +# _contract_diag_diag!(Cdata,Clabels,Adata,Alabels,Bdata,Blabels) +# end +# +# return Cstore +#end # Contract function for Diag uniform * Diag uniform -function _contract(Cinds::IndexSet, - Clabels::Vector{Int}, - Astore::Diag{SA}, - Ainds::IndexSet, - Alabels::Vector{Int}, - Bstore::Diag{SB}, - Binds::IndexSet, - Blabels::Vector{Int}) where {SA<:Number,SB<:Number} - SC = promote_type(SA,SB) - - # Convert the arrays to a common type - # since we will call BLAS - Astore = convert(Diag{SC},Astore) - Bstore = convert(Diag{SC},Bstore) - Cstore = Diag{SC}(minDim(Cinds)) - - Adata = data(Astore) - Bdata = data(Bstore) - Cdata = data(Cstore) - - if length(Clabels) == 0 # If all indices of A and B are contracted - # all indices are summed over, just add the product of the diagonal - # elements of A and B - min_dim = minimum(dims(Ainds)) # == length(Bdata) - # Need to set to zero since - # Cdata is originally uninitialized memory - # (so causes random output) - Cdata = zero(SC) - for i = 1:min_dim - Cdata += Adata*Bdata - end - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - Cdata = Adata*Bdata - end - - Cstore.data = Cdata - return Cstore -end +#function _contract(Cinds::IndexSet, +# Clabels::Vector{Int}, +# Astore::Diag{SA}, +# Ainds::IndexSet, +# Alabels::Vector{Int}, +# Bstore::Diag{SB}, +# Binds::IndexSet, +# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} +# SC = promote_type(SA,SB) +# +# # Convert the arrays to a common type +# # since we will call BLAS +# Astore = convert(Diag{SC},Astore) +# Bstore = convert(Diag{SC},Bstore) +# Cstore = Diag{SC}(minDim(Cinds)) +# +# Adata = data(Astore) +# Bdata = data(Bstore) +# Cdata = data(Cstore) +# +# if length(Clabels) == 0 # If all indices of A and B are contracted +# # all indices are summed over, just add the product of the diagonal +# # elements of A and B +# min_dim = minimum(dims(Ainds)) # == length(Bdata) +# # Need to set to zero since +# # Cdata is originally uninitialized memory +# # (so causes random output) +# Cdata = zero(SC) +# for i = 1:min_dim +# Cdata += Adata*Bdata +# end +# else +# # not all indices are summed over, set the diagonals of the result +# # to the product of the diagonals of A and B +# Cdata = Adata*Bdata +# end +# +# Cstore.data = Cdata +# return Cstore +#end -function _contract_diag_dense!(Cdata::Array{T,NC},Clabels::Vector{Int}, - Adata::Vector{T},Alabels::Vector{Int}, - Bdata::Array{T,NB},Blabels::Vector{Int}) where {T,NB,NC} +function _contract!(C::DenseTensor{ElC,NC},Clabels, + A::DiagTensor{ElA,NA},Alabels, + B::DenseTensor{ElB,NB},Blabels) where {ElA,NA,ElB,NB,ElC,NC} if all(i -> i < 0, Blabels) # If all of B is contracted - min_dim = minimum(size(Bdata)) + # Can rewrite NC+NB==NA + min_dim = minimum(dims(B)) if length(Clabels) == 0 # all indices are summed over, just add the product of the diagonal # elements of A and B for i = 1:min_dim - Cdata[1] += Adata[i]*Bdata[ntuple(_->i,Val(NB))...] + diag_inds_A = CartesianIndex{NA}(ntuple(_->i,Val(NA))) + diag_inds_B = CartesianIndex{NB}(ntuple(_->i,Val(NB))) + C[1] += A[diag_inds_A]*B[diag_inds_B] end else # not all indices are summed over, set the diagonals of the result # to the product of the diagonals of A and B # TODO: should we make this return a Diag storage? for i = 1:min_dim - Cdata[ntuple(_->i,Val(NC))...] = Adata[i]*Bdata[ntuple(_->i,Val(NB))...] + diag_inds_A = CartesianIndex{NA}(ntuple(_->i,Val(NA))) + diag_inds_B = CartesianIndex{NB}(ntuple(_->i,Val(NB))) + diag_inds_C = CartesianIndex{NC}(ntuple(_->i,Val(NC))) + C[diag_inds_C] = A[diag_inds_A]*B[diag_inds_B] end end else @@ -650,8 +713,8 @@ function _contract_diag_dense!(Cdata::Array{T,NC},Clabels::Vector{Int}, for ib = 1:length(Blabels) ia = findfirst(==(Blabels[ib]),Alabels) if(!isnothing(ia)) - b_cstride += stride(Bdata,ib) - bstart += astarts[ia]*stride(Bdata,ib) + b_cstride += stride(B,ib) + bstart += astarts[ia]*stride(B,ib) else nbu += 1 end @@ -661,31 +724,31 @@ function _contract_diag_dense!(Cdata::Array{T,NC},Clabels::Vector{Int}, for ic = 1:length(Clabels) ia = findfirst(==(Clabels[ic]),Alabels) if(!isnothing(ia)) - c_cstride += stride(Cdata,ic) - cstart += astarts[ia]*stride(Cdata,ic) + c_cstride += stride(C,ic) + cstart += astarts[ia]*stride(C,ic) end end # strides of the uncontracted dimensions of - # Bdata + # B bustride = zeros(Int,nbu) custride = zeros(Int,nbu) # size of the uncontracted dimensions of - # Bdata, to be used in CartesianIndices + # B, to be used in CartesianIndices busize = zeros(Int,nbu) n = 1 for ib = 1:length(Blabels) if Blabels[ib] > 0 - bustride[n] = stride(Bdata,ib) - busize[n] = size(Bdata,ib) + bustride[n] = stride(B,ib) + busize[n] = size(B,ib) ic = findfirst(==(Blabels[ib]),Clabels) - custride[n] = stride(Cdata,ic) + custride[n] = stride(C,ic) n += 1 end end - boffset_orig = 1-sum(strides(Bdata)) - coffset_orig = 1-sum(strides(Cdata)) + boffset_orig = 1-sum(strides(B)) + coffset_orig = 1-sum(strides(C)) cartesian_inds = CartesianIndices(Tuple(busize)) for inds in cartesian_inds boffset = boffset_orig @@ -695,12 +758,94 @@ function _contract_diag_dense!(Cdata::Array{T,NC},Clabels::Vector{Int}, boffset += ii*bustride[i] coffset += ii*custride[i] end - for j in 1:length(Adata) - Cdata[cstart+j*c_cstride+coffset] += Adata[j]*Bdata[bstart+j*b_cstride+boffset] + for j in 1:diag_length(A) + diag_inds_A = CartesianIndex{NA}(ntuple(_->j,Val(NA))) + C[cstart+j*c_cstride+coffset] += A[diag_inds_A]*B[bstart+j*b_cstride+boffset] end end end end +_contract!(C::DenseTensor,Clabels,A::DenseTensor,Alabels,B::DiagTensor,Blabels) = _contract!(C,Clabels,B,Blabels,A,Alabels) + +#function _contract_diag_dense!(Cdata::Array{T,NC},Clabels::Vector{Int}, +# Adata::Vector{T},Alabels::Vector{Int}, +# Bdata::Array{T,NB},Blabels::Vector{Int}) where {T,NB,NC} +# if all(i -> i < 0, Blabels) # If all of B is contracted +# min_dim = minimum(size(Bdata)) +# if length(Clabels) == 0 +# # all indices are summed over, just add the product of the diagonal +# # elements of A and B +# for i = 1:min_dim +# Cdata[1] += Adata[i]*Bdata[ntuple(_->i,Val(NB))...] +# end +# else +# # not all indices are summed over, set the diagonals of the result +# # to the product of the diagonals of A and B +# # TODO: should we make this return a Diag storage? +# for i = 1:min_dim +# Cdata[ntuple(_->i,Val(NC))...] = Adata[i]*Bdata[ntuple(_->i,Val(NB))...] +# end +# end +# else +# astarts = zeros(Int,length(Alabels)) +# bstart = 0 +# cstart = 0 +# b_cstride = 0 +# nbu = 0 +# for ib = 1:length(Blabels) +# ia = findfirst(==(Blabels[ib]),Alabels) +# if(!isnothing(ia)) +# b_cstride += stride(Bdata,ib) +# bstart += astarts[ia]*stride(Bdata,ib) +# else +# nbu += 1 +# end +# end +# +# c_cstride = 0 +# for ic = 1:length(Clabels) +# ia = findfirst(==(Clabels[ic]),Alabels) +# if(!isnothing(ia)) +# c_cstride += stride(Cdata,ic) +# cstart += astarts[ia]*stride(Cdata,ic) +# end +# end +# +# # strides of the uncontracted dimensions of +# # Bdata +# bustride = zeros(Int,nbu) +# custride = zeros(Int,nbu) +# # size of the uncontracted dimensions of +# # Bdata, to be used in CartesianIndices +# busize = zeros(Int,nbu) +# n = 1 +# for ib = 1:length(Blabels) +# if Blabels[ib] > 0 +# bustride[n] = stride(Bdata,ib) +# busize[n] = size(Bdata,ib) +# ic = findfirst(==(Blabels[ib]),Clabels) +# custride[n] = stride(Cdata,ic) +# n += 1 +# end +# end +# +# boffset_orig = 1-sum(strides(Bdata)) +# coffset_orig = 1-sum(strides(Cdata)) +# cartesian_inds = CartesianIndices(Tuple(busize)) +# for inds in cartesian_inds +# boffset = boffset_orig +# coffset = coffset_orig +# for i in 1:nbu +# ii = inds[i] +# boffset += ii*bustride[i] +# coffset += ii*custride[i] +# end +# for j in 1:length(Adata) +# Cdata[cstart+j*c_cstride+coffset] += Adata[j]*Bdata[bstart+j*b_cstride+boffset] +# end +# end +# end +#end function _contract_diag_dense!(Cstore::Dense,Cinds,Clabels::Vector{Int}, Astore::Diag{TA},Ainds,Alabels::Vector{Int}, diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 654c7d9c8a..d09d234cf4 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -30,6 +30,11 @@ dim(ds::Dims) = prod(ds) Base.strides(is::Dims) = Base.size_to_strides(1, dims(is)...) Base.copy(ds::Dims) = ds +Base.promote_rule(::Type{<:Dims},::Type{Val{N}}) where {N} = Dims{N} + +ValLength(::Type{Dims{N}}) where {N} = Val{N} +ValLength(::Dims{N}) where {N} = Val(N) + # This is to help with some generic programming in the Tensor # code (it helps to construct a Tuple(::NTuple{N,Int}) where the # only known thing for dispatch is a concrete type such @@ -68,10 +73,12 @@ Base.similar(T::Tensor,::Type{S},dims::Dims) where {S} = Tensor(similar(store(T) # return Tensor(convert(StoreR,store(T)),copy(inds(T))) #end -function Base.promote_rule(::Type{TensorT1}, - ::Type{TensorT2}) where - {TensorT1<:Tensor{ElT1,N1,StoreT1},TensorT2<:Tensor{ElT2,N2,StoreT2}} where - {ElT1,ElT2,N1,N2,StoreT1,StoreT2} +function Base.zeros(::Type{<:Tensor{ElT,<:Any,StoreT,<:Any}},inds::IndsT) where {ElT,N,StoreT,IndsT} + return Tensor(zeros(StoreT,dim(inds)),inds) +end + +function Base.promote_rule(::Type{<:Tensor{ElT1,N1,StoreT1}}, + ::Type{<:Tensor{ElT2,N2,StoreT2}}) where {ElT1,ElT2,N1,N2,StoreT1,StoreT2} return Tensor{promote_type(ElT1,ElT2),N3,promote_type(StoreT1,StoreT2)} where {N3} end @@ -81,6 +88,14 @@ function Base.promote_rule(::Type{Tensor{ElT1,N,StoreT1,Inds}}, return Tensor{promote_type(ElT1,ElT2),N,promote_type(StoreT1,StoreT2),Inds} end +#function Base.promote_rule(::Type{<:Tensor{ElT,<:Any,StoreT,<:Any}},::Type{IndsR}) where {N,ElT,StoreT,IndsR} +# return Tensor{ElT,length(IndsR),StoreT,IndsR} +#end + +function StaticArrays.similar_type(::Type{<:Tensor{ElT,<:Any,StoreT,<:Any}},indsR) where {N,ElT,StoreT} + return Tensor{ElT,length(indsR),StoreT,typeof(indsR)} +end + Base.BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() function Base.similar(bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{T}}, diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index 19ceafe2e2..bf786f54f1 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -454,6 +454,7 @@ end jj = Index(4) T = randomITensor(ComplexF64,ii,jj) U,S,V = svd(T,ii;maxdim=2) + u,s,v = svd(Matrix(T)) @test norm(U*S*V-T)≈sqrt(s[3]^2+s[4]^2) end @@ -488,7 +489,7 @@ end T = randomITensor(is...,prime(is)...) T = T + swapprime(dag(T),0,1) U,D,u = eigenHermitian(T) - @test A≈U*D*prime(dag(U)) + @test T ≈ U*D*prime(dag(U)) UUᴴ = U*prime(dag(U),u) @test UUᴴ ≈ δ(u,u') atol=1e-14 end From 431db0ef4375d3a92212e51524ef34581eba2808 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 17 Sep 2019 18:40:59 -0400 Subject: [PATCH 13/27] Dense and Diag tests pass, starting on Combiner --- src/decomp.jl | 20 +- src/indexset.jl | 18 +- src/itensor.jl | 33 +- src/mps/mps.jl | 9 +- src/tensor/combiner.jl | 237 ++++++---- src/tensor/contraction_logic.jl | 4 +- src/tensor/dense.jl | 162 ++++--- src/tensor/diag.jl | 753 +++++--------------------------- test/runtests.jl | 4 +- test/test_combiner.jl | 36 +- test/test_ctmrg.jl | 37 +- test/test_indexset.jl | 20 +- 12 files changed, 473 insertions(+), 860 deletions(-) diff --git a/src/decomp.jl b/src/decomp.jl index 7add719561..9fc8b18ae4 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -122,7 +122,7 @@ function _factorize_from_left_eigen(A::ITensor, kwargs...) Lis = commoninds(inds(A),IndexSet(Linds...)) A² = A*prime(dag(A),Lis) - FU,D = eigen(A²,Lis,prime(Lis);kwargs...) + FU,D = eigenHermitian(A²,Lis,prime(Lis);kwargs...) FV = dag(FU)*A return FU,FV,commonindex(FU,FV) end @@ -132,7 +132,7 @@ function _factorize_from_right_eigen(A::ITensor, kwargs...) Ris = uniqueinds(inds(A),IndexSet(Linds...)) A² = A*prime(dag(A),Ris) - FV,D = eigen(A²,Ris,prime(Ris); kwargs...) + FV,D = eigenHermitian(A²,Ris,prime(Ris); kwargs...) FU = A*dag(FV) return FU,FV,commonindex(FU,FV) end @@ -177,18 +177,20 @@ function eigenHermitian(A::ITensor, Rinds=prime(IndexSet(Linds)); kwargs...) tags::TagSet = get(kwargs,:tags,"Link,eigen") + lefttags::TagSet = get(kwargs,:lefttags,tags) + righttags::TagSet = get(kwargs,:righttags,prime(tags)) Lis = commoninds(inds(A),IndexSet(Linds...)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - UT,DT = eigenHermitian(Tensor(A),Lpos,Rpos) + UT,DT = eigenHermitian(Tensor(A),Lpos,Rpos;kwargs...) U,D = ITensor(UT),ITensor(DT) u = commonindex(U,D) - settags!(U,tags,u) - settags!(D,tags,u) - u = settags(u,tags) + settags!(U,lefttags,u) + settags!(D,lefttags,u) + u = settags(u,lefttags) v = uniqueindex(D,U) - D *= δ(v,u') - return U,D,u + D *= δ(v,settags(u,righttags)) + return U,D,u,v end import LinearAlgebra.eigen @@ -200,7 +202,7 @@ function eigen(A::ITensor, Lis = commoninds(inds(A),IndexSet(Linds...)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - UT,DT = eigen(Tensor(A),Lpos,Rpos) + UT,DT = eigen(Tensor(A),Lpos,Rpos;kwargs...) U,D = ITensor(UT),ITensor(DT) u = commonindex(U,D) settags!(U,tags,u) diff --git a/src/indexset.jl b/src/indexset.jl index f7ea219d95..ee0403a78d 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -4,6 +4,7 @@ export IndexSet, hassameinds, findindex, findinds, + indexposition, swaptags, swaptags!, swapprime, @@ -235,7 +236,7 @@ function uniqueindex(Ainds,Binds) for j ∈ Ais _is_unique_index(j,Binds) && return j end - return Index() + return nothing end # This version can check for repeats, but is a bit # slower because of IndexSet allocation @@ -269,7 +270,7 @@ function commonindex(Ainds,Binds) for i ∈ Ais hasindex(Binds,i) && return i end - return Index() + return nothing end # This version checks if there are more than one indices #commonindex(Ais,Bis) = Index(commoninds(Ais,Bis)) @@ -307,7 +308,7 @@ function findindex(inds,tags) end end # TODO: should this return `nothing` if no Index is found? - return Index() + return nothing end # This version checks if there are more than one indices #findindex(inds, tags) = Index(findinds(inds,tags)) @@ -315,13 +316,13 @@ end # TODO: Should this return `nothing` like `findfirst`? # Should this just use `findfirst`? function indexposition(is::IndexSet, - i::Index)::Int + i::Index) for (n,j) in enumerate(is) if i==j return n end end - return 0 + return nothing end # From a tag set or index set, find the positions @@ -484,11 +485,12 @@ swaptags(is, vargs...) = swaptags!(copy(is), vargs...) # Helper functions for contracting ITensors # -function compute_contraction_labels(Ai::IndexSet{N1},Bi::IndexSet{N2}) where {N1,N2} +function compute_contraction_labels(Ai::IndexSet{N1}, + Bi::IndexSet{N2}) where {N1,N2} rA = length(Ai) rB = length(Bi) - Aind = MVector{N1}(ntuple(_->0,Val(N1))) - Bind = MVector{N2}(ntuple(_->0,Val(N2))) + Aind = MVector{N1,Int}(ntuple(_->0,Val(N1))) + Bind = MVector{N2,Int}(ntuple(_->0,Val(N2))) ncont = 0 for i = 1:rA, j = 1:rB diff --git a/src/itensor.jl b/src/itensor.jl index eea55cc907..858fbd6f9b 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -245,6 +245,7 @@ isNull(T::ITensor) = (eltype(T) === Nothing) Base.copy(T::ITensor{N}) where {N} = ITensor{N}(copy(Tensor(T)))::ITensor{N} # TODO: make versions where the element type can be specified +# Should this be array? (Version that makes a view) Base.Array(T::ITensor) = Array(Tensor(T)) Base.Matrix(T::ITensor{N}) where {N} = (N==2 ? Array(Tensor(T)) : throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix"))) @@ -353,7 +354,7 @@ replacetags!(A::ITensor,vargs...) = ( replacetags!(inds(A),vargs...); return A ) replacetags(A::ITensor,vargs...) = ITensor(store(A),replacetags(inds(A),vargs...)) settags!(A::ITensor,vargs...) = ( settags!(inds(A),vargs...); return A ) -settags(A::ITensor,vargs...) = ITensor(store(A),settags(inds(A))) +settags(A::ITensor,vargs...) = ITensor(store(A),settags(inds(A),vargs...)) swaptags(A::ITensor,vargs...) = ITensor(store(A),swaptags(inds(A),vargs...)) @@ -394,11 +395,11 @@ function combiner(inds::IndexSet; kwargs...) tags = get(kwargs, :tags, "CMB,Link") new_ind = Index(prod(dims(inds)), tags) new_is = IndexSet(new_ind, inds) - return ITensor(new_is, CombinerStorage(new_ind)) + return ITensor(Combiner(),new_is),new_ind end combiner(inds::Index...; kwargs...) = combiner(IndexSet(inds...); kwargs...) -combinedindex(T::ITensor) = store(T) isa CombinerStorage ? store(T).ci : Index() +combinedindex(T::ITensor) = store(T) isa Combiner ? store(T).ci : nothing LinearAlgebra.norm(T::ITensor) = norm(Tensor(T)) @@ -484,16 +485,7 @@ B .+= α .* A add!(R::ITensor,T::ITensor) = add!(R,1,T) function add!(R::ITensor{N},α::Number,T::ITensor{N}) where {N} - perm = getperm(inds(R),inds(T)) - TR,TT = Tensor(R),Tensor(T) - - # Include type promotion from α - TR = convert(promote_type(typeof(TR),typeof(TT)),TR) - TR = permutedims!!(TR,TT,perm,(r,t)->r+α*t) - - setstore!(R,store(TR)) - setinds!(R,inds(TR)) - return R + return apply!(R,T,(r,t)->r+α*t) end """ @@ -505,9 +497,20 @@ A .= α .* A .+ β .* B ``` """ function add!(R::ITensor{N},αr::Number,αt::Number,T::ITensor{N}) where {N} + return apply!(R,T,(r,t)->αr*r+αt*t) +end + +function apply!(R::ITensor{N},T::ITensor{N},f::Function) where {N} perm = getperm(inds(R),inds(T)) - TR = permutedims!(Tensor(R),Tensor(T),perm,(r,t)->αr*r+αt*t) - return ITensor(TR) + TR,TT = Tensor(R),Tensor(T) + + # TODO: Include type promotion from α + TR = convert(promote_type(typeof(TR),typeof(TT)),TR) + TR = permutedims!!(TR,TT,perm,f) + + setstore!(R,store(TR)) + setinds!(R,inds(TR)) + return R end """ diff --git a/src/mps/mps.jl b/src/mps/mps.jl index a02667d47b..899d79ab4a 100644 --- a/src/mps/mps.jl +++ b/src/mps/mps.jl @@ -125,7 +125,7 @@ function linkindex(M::MPS,j::Integer) N = length(M) j ≥ length(M) && error("No link index to the right of site $j (length of MPS is $N)") li = commonindex(M[j],M[j+1]) - if isdefault(li) + if isnothing(li) error("linkindex: no MPS link index at link $j") end return li @@ -144,12 +144,7 @@ function siteindex(M::MPS,j::Integer) end function siteinds(M::MPS) - N = length(M) - is = IndexSet(N) - for j in eachindex(M) - is[j] = siteindex(M,j) - end - return is + return [siteindex(M,j) for j in 1:length(M)] end function replacesites!(M::MPS,sites) diff --git a/src/tensor/combiner.jl b/src/tensor/combiner.jl index f001e105c7..157346d625 100644 --- a/src/tensor/combiner.jl +++ b/src/tensor/combiner.jl @@ -1,81 +1,172 @@ export Combiner -struct CombinerStorage <: TensorStorage - ci::Index +struct Combiner <: TensorStorage end -function storage_contract(CSstore::CombinerStorage, - Cis::IndexSet, - Dstore::Dense, - dis::IndexSet) - cind = Cis[1] - if hasindex(dis, cind) # has combined index, uncombine - cpos = indexposition(dis,cind) - dinds = inds(dis) - Cinds = inds(Cis) - Nis = IndexSet(vcat(dinds[1:cpos-1], Cinds[2:end], dinds[cpos+1:end])) - return Nis, Dstore - else # lacks combined index, combine - # dis doesn't have cind, replace - # Cis[1], Cis[2], ... with cind, may need to permute - j_1_pos = indexposition(dis,Cis[2]) - if j_1_pos === nothing - throw(ArgumentError("tensor missing index $(Cis[2]) in combiner-tensor product")) - end - - # Check if Cis[2], Cis[3], ... are grouped together (contiguous) - # and in same order as on combiner - contig_sameord = true - j = j_1_pos+1 - for c=3:length(Cis) - if j > length(dis) || (dis[j] != Cis[c]) - contig_sameord = false - break - end - j += 1 - end - - if contig_sameord - lend = j_1_pos-1 - rstart = j_1_pos+length(Cis)-1 - dinds = inds(dis) - Nis = IndexSet(vcat(dinds[1:lend], cind, dinds[rstart:end])) - else # permutation required - # Set P destination values to -1 to mark indices that need to be assigned destinations: - P = fill(-1, length(dis)) - # permute combined indices to the front, in same order as in Cis: - ni = 1 - for c in 2:length(Cis) - j = indexposition(dis, Cis[c]) - if j === nothing - throw(ArgumentError("tensor missing index $(Cis[c]) in combiner-tensor product")) - end - P[j] = ni - ni += 1 - end - - Nis = IndexSet(length(dis)+2-length(Cis)) - Nis[1] = cind - i = 2 - for j in 1:length(dis) - if P[j] == -1 - P[j] = ni - ni += 1 - Nis[i] = dis[j] - i += 1 - end - end - - ddata = vec(permutedims(reshape(data(Dstore), dims(dis)), invperm(P))) - Dstore = Dense{eltype(Dstore)}(ddata) - end - return Nis, Dstore +Base.eltype(::Combiner) = Nothing + +Base.promote_rule(::Type{<:Combiner},StorageT::Type{<:Dense}) = StorageT + +# +# CombinerTensor (Tensor using Dense storage) +# + +const CombinerTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Combiner} + +function contraction_output_type(TensorT1::Type{<:CombinerTensor}, + TensorT2::Type{<:DenseTensor}, + indsR) + return similar_type(promote_type(TensorT1,TensorT2),indsR) +end + +function contraction_output_type(TensorT1::Type{<:DenseTensor}, + TensorT2::Type{<:CombinerTensor}, + indsR) + return contraction_output_type(TensorT2,TensorT1,indsR) +end + +function contraction_output(TensorT1::Type{<:CombinerTensor}, + TensorT2::Type{<:DenseTensor}, + indsR) + return similar(contraction_output_type(TensorT1,TensorT2,indsR),indsR) +end + +function contraction_output(TensorT1::Type{<:DenseTensor}, + TensorT2::Type{<:CombinerTensor}, + indsR) + return contraction_output(TensorT2,TensorT1,indsR) +end + +function count_unique(labelsT1,labelsT2) + count = 0 + for l1 ∈ labelsT1 + l1 ∉ labelsT2 && (count += 1) + end + return count +end + +function count_common(labelsT1,labelsT2) + count = 0 + for l1 ∈ labelsT1 + l1 ∈ labelsT2 && (count += 1) + end + return count +end + +function contract!!(R::Tensor{<:Number,NR}, + labelsR::NTuple{NR}, + T1::CombinerTensor{<:Any,N1}, + labelsT1::NTuple{N1}, + T2::Tensor{<:Number,N2}, + labelsT2::NTuple{N2}) where {NR,N1,N2} + @show labelsT1 + @show labelsT2 + @show count_unique(labelsT1,labelsT2) + @show count_common(labelsT1,labelsT2) + + if N1 ≤ 1 + println("identity") + return R + elseif N1 + N2 == NR + error("Cannot perform outer product involving a combiner") + elseif N1 == 2 && count_unique(labelsT1,labelsT2) == 1 && + count_common(labelsT1,labelsT2) == 1 + # This is the case of index replacement + println("index replacement") + elseif count_unique(labelsT1,labelsT2) > 1 && + count_common(labelsT1,labelsT2) == 1 + # This is the case of uncombining + println("uncombining") + elseif count_unique(labelsT1,labelsT2) == 1 && + count_common(labelsT1,labelsT2) > 1 + # This is the case of combining + println("combining") end + return R end -function storage_contract(Dstore::Dense, - dis::IndexSet, - CSstore::CombinerStorage, - Cis::IndexSet) - return storage_contract(CSstore, Cis, Dstore, dis) +function contract!!(R::Tensor{<:Number,NR}, + labelsR::NTuple{NR}, + T1::Tensor{<:Number,N1}, + labelsT1::NTuple{N1}, + T2::CombinerTensor{<:Any,N2}, + labelsT2::NTuple{N2}) where {NR,N1,N2} + return contract!!(R,labelsR,T2,labelsT2,T1,labelsT1) end + +#function storage_contract(CSstore::CombinerStorage, +# Cis::IndexSet, +# Dstore::Dense, +# dis::IndexSet) +# cind = Cis[1] +# if hasindex(dis, cind) # has combined index, uncombine +# cpos = indexposition(dis,cind) +# dinds = inds(dis) +# Cinds = inds(Cis) +# Nis = IndexSet(vcat(dinds[1:cpos-1], Cinds[2:end], dinds[cpos+1:end])) +# return Nis, Dstore +# else # lacks combined index, combine +# # dis doesn't have cind, replace +# # Cis[1], Cis[2], ... with cind, may need to permute +# j_1_pos = indexposition(dis,Cis[2]) +# if isnothing(j_1_pos) +# throw(ArgumentError("tensor missing index $(Cis[2]) in combiner-tensor product")) +# end +# +# # Check if Cis[2], Cis[3], ... are grouped together (contiguous) +# # and in same order as on combiner +# contig_sameord = true +# j = j_1_pos+1 +# for c=3:length(Cis) +# if j > length(dis) || (dis[j] != Cis[c]) +# contig_sameord = false +# break +# end +# j += 1 +# end +# +# if contig_sameord +# lend = j_1_pos-1 +# rstart = j_1_pos+length(Cis)-1 +# dinds = inds(dis) +# Nis = IndexSet(vcat(dinds[1:lend], cind, dinds[rstart:end])) +# else # permutation required +# # Set P destination values to -1 to mark indices that need to be assigned destinations: +# P = fill(-1, length(dis)) +# # permute combined indices to the front, in same order as in Cis: +# ni = 1 +# for c in 2:length(Cis) +# j = indexposition(dis, Cis[c]) +# if isnothing(j) +# throw(ArgumentError("tensor missing index $(Cis[c]) in combiner-tensor product")) +# end +# P[j] = ni +# ni += 1 +# end +# +# Nis = IndexSet(length(dis)+2-length(Cis)) +# Nis[1] = cind +# i = 2 +# for j in 1:length(dis) +# if P[j] == -1 +# P[j] = ni +# ni += 1 +# Nis[i] = dis[j] +# i += 1 +# end +# end +# +# ddata = vec(permutedims(reshape(data(Dstore), dims(dis)), invperm(P))) +# Dstore = Dense{eltype(Dstore)}(ddata) +# end +# return Nis, Dstore +# end +#end +# +#function storage_contract(Dstore::Dense, +# dis::IndexSet, +# CSstore::CombinerStorage, +# Cis::IndexSet) +# return storage_contract(CSstore, Cis, Dstore, dis) +#end + diff --git a/src/tensor/contraction_logic.jl b/src/tensor/contraction_logic.jl index 013e76a274..34e741097d 100644 --- a/src/tensor/contraction_logic.jl +++ b/src/tensor/contraction_logic.jl @@ -242,7 +242,7 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) for k = 1:rc #j = find_index(props.ai,props.ci[k]) j = findfirst(==(props.ci[k]),props.ai) - if j!==nothing + if !isnothing(j) props.AtoC[newi] = k props.PA[newi+1] = j newi += 1 @@ -304,7 +304,7 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) for k = 1:rc #j = find_index(props.bi,props.ci[k]) j = findfirst(==(props.ci[k]),props.bi) - if j!==nothing + if !isnothing(j) props.BtoC[newi] = k props.PB[newi+1] = j newi += 1 diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 7635659179..74c8a02df3 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -42,8 +42,16 @@ Base.eltype(::Dense{T}) where {T} = eltype(T) Base.eltype(::Dense{Nothing}) = Nothing Base.eltype(::Type{Dense{T}}) where {T} = eltype(T) -Base.promote_rule(::Type{Dense{T1}},::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} -Base.convert(::Type{Dense{R}},D::Dense) where {R} = Dense{R}(convert(Vector{R},data(D))) +Base.promote_rule(::Type{Dense{T1}}, + ::Type{Dense{T2}}) where {T1,T2} = Dense{promote_type(T1,T2)} +Base.convert(::Type{Dense{R}}, + D::Dense) where {R} = Dense{R}(convert(Vector{R},data(D))) + +function Base.:*(D::Dense{<:El},x::S) where {El<:Number,S<:Number} + return Dense{promote_type(El,S)}(x*data(D)) +end + +Base.:*(x::Number,D::Dense) = D*x # # DenseTensor (Tensor using Dense storage) @@ -56,6 +64,16 @@ Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() Base.getindex(T::DenseTensor,i::Integer) = store(T)[i] Base.setindex!(T::DenseTensor,v,i::Integer) = (store(T)[i] = v) +# Get the specified value on the diagonal +function getdiag(T::DenseTensor{<:Number,N},ind::Int) where {N} + return T[CartesianIndex(ntuple(_->ind,Val(N)))] +end + +# Set the specified value on the diagonal +function setdiag!(T::DenseTensor{<:Number,N},val,ind::Int) where {N} + T[CartesianIndex(ntuple(_->ind,Val(N)))] = val +end + Base.strides(T::DenseTensor) = strides(inds(T)) # Needed for passing Tensor{T,2} to BLAS @@ -63,9 +81,9 @@ function Base.unsafe_convert(::Type{Ptr{ElT}},T::DenseTensor{ElT}) where {ElT} return Base.unsafe_convert(Ptr{ElT},data(store(T))) end -function Base.convert(::Type{TensorR}, - T::DenseTensor{ElT,N}) where {TensorR<:Tensor{ElR,N,StoreR}} where - {ElR,ElT,N,StoreR<:Dense} +# Convert a Dense Tensor to a Tensor with the specified storage +function Base.convert(::Type{<:Tensor{<:Any,<:Any,StoreR}}, + T::DenseTensor) where {StoreR} return Tensor(convert(StoreR,store(T)),inds(T)) end @@ -100,9 +118,11 @@ end # Version that may overwrite the result or promote # and return the result -function permutedims!!(R::DenseTensor{<:Number,N}, - T::DenseTensor{<:Number,N}, - perm::NTuple{N,Int},f=(r,t)->t) where {N} +# TODO: move to tensor.jl? +function permutedims!!(R::Tensor, + T::Tensor, + perm::NTuple{N,Int}, + f=(r,t)->t) where {N} permutedims!(R,T,perm,f) return R end @@ -111,10 +131,17 @@ end function Base.permutedims(T::Tensor{<:Number,N}, perm::NTuple{N,Int}) where {N} Tp = similar(T,permute(inds(T),perm)) - permutedims!(Tp,T,perm) + Tp = permutedims!!(Tp,T,perm) return Tp end +# TODO: move to tensor.jl? +function Base.:*(x::Number, + T::Tensor) + return Tensor(x*store(T),inds(T)) +end +Base.:*(T::Tensor, x::Number) = x*T + # For use in custom permutedims! using Base.Cartesian: @nexprs, @ntuple, @@ -165,18 +192,26 @@ function Base.permutedims!(TP::DenseTensor{<:Number,0}, return TP end -function outer!(R::DenseTensor,T1::DenseTensor,T2::DenseTensor) - # TODO: make vec(::Tensor) work? Would this be a view? What - # happens to sparse tensors like Diag? +function outer!(R::DenseTensor, + T1::DenseTensor, + T2::DenseTensor) v1 = vec(T1) v2 = vec(T2) - R .= zero(ElR) RM = reshape(R,dim(v1),dim(v2)) - BLAS.ger!(one(ElR),v1,v2,R) + RM .= v1 .* transpose(v2) return R end -function outer(T1::DenseTensor{ElT1},T2::DenseTensor{ElT2}) where {ElT1,ElT2} +function outer!!(R::Tensor, + T1::Tensor, + T2::Tensor) + outer!(R,T1,T2) + return R +end + +# TODO: call outer!!, make this generic +function outer(T1::DenseTensor{ElT1}, + T2::DenseTensor{ElT2}) where {ElT1,ElT2} array_outer = vec(Array(T1))*transpose(vec(Array(T2))) inds_outer = unioninds(inds(T1),inds(T2)) return Tensor(Dense{promote_type(ElT1,ElT2)}(vec(array_outer)),inds_outer) @@ -196,67 +231,86 @@ function contraction_output(TensorT1::Type{<:DenseTensor}, end # TODO: move to tensor.jl? -function contract(T1::Tensor{<:Number,N1},labelsT1, - T2::Tensor{<:Number,N2},labelsT2) where {N1,N2} +function contract(T1::Tensor{<:Any,N1}, + labelsT1, + T2::Tensor{<:Any,N2}, + labelsT2) where {N1,N2} indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) - #if N1+N2==length(indsR) - # R = T1⊗T2 - #else R = contraction_output(typeof(T1),typeof(T2),indsR) - #R = similar(contraction_output_type(typeof(T1),typeof(T2),Val{length(indsR)}),indsR) - #R = similar(promote_type(typeof(T1),typeof(T2)),indsR) - contract!(R,labelsR,T1,labelsT1,T2,labelsT2) - #end + # contract!! version here since the output R may not + # be mutable (like UniformDiag) + R = contract!!(R,labelsR,T1,labelsT1,T2,labelsT2) return R end -function contract!(R::Tensor{<:Number,NR},labelsR::NTuple{NR,Int}, - T1::Tensor{<:Number,N1},labelsT1::NTuple{N1,Int}, - T2::Tensor{<:Number,N2},labelsT2::NTuple{N2,Int}) where {NR,N1,N2} +# Move to tensor.jl? Is this generic for all storage types? +function contract!!(R::Tensor{<:Number,NR}, + labelsR::NTuple{NR}, + T1::Tensor{<:Number,N1}, + labelsT1::NTuple{N1}, + T2::Tensor{<:Number,N2}, + labelsT2::NTuple{N2}) where {NR,N1,N2} if N1==0 # TODO: replace with an add! function? # What about doing `R .= T1[] .* PermutedDimsArray(T2,perm)`? perm = getperm(labelsR,labelsT2) - permutedims!(R,T2,perm,(r,t2)->T1[]*t2) + R = permutedims!!(R,T2,perm,(r,t2)->T1[]*t2) elseif N2==0 perm = getperm(labelsR,labelsT1) - permutedims!(R,T1,perm,(r,t1)->T2[]*t1) + R = permutedims!!(R,T1,perm,(r,t1)->T2[]*t1) elseif N1+N2==NR # TODO: permute T1 and T2 appropriately first (can be more efficient # then permuting the result of T1⊗T2) # TODO: implement the in-place version directly - outer!(R,T1,T2) + R = outer!!(R,T1,T2) labelsRp = unioninds(labelsT1,labelsT2) perm = getperm(labelsR,labelsRp) if !is_trivial_permutation(perm) - permutedims!(R,copy(R),perm) + R = permutedims!!(R,copy(R),perm) end else - _contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + R = _contract!!(R,labelsR,T1,labelsT1,T2,labelsT2) end return R end Base.copyto!(R::Tensor,T::Tensor) = copyto!(store(R),store(T)) +# Move to tensor.jl? Overload this function +# for immutable storage types +function _contract!!(R::Tensor,labelsR, + T1::Tensor,labelsT1, + T2::Tensor,labelsT2) + _contract!(R,labelsR,T1,labelsT1,T2,labelsT2) + return R +end + # TODO: make sure this is doing type promotion correctly # since we are calling BLAS (need to promote T1 and T2 to # the same types) -function _contract!(R::DenseTensor,labelsR, - T1::DenseTensor,labelsT1, - T2::DenseTensor,labelsT2) +function _contract!(R::DenseTensor, + labelsR, + T1::Tensor{<:Number,<:Any,StoreT1}, + labelsT1, + T2::Tensor{<:Number,<:Any,StoreT2}, + labelsT2) where {StoreT1<:Dense,StoreT2<:Dense} props = ContractionProperties(labelsT1,labelsT2,labelsR) compute_contraction_properties!(props,T1,T2,R) + + # We do type promotion here for BLAS (to ensure + # we contract DenseComplex*DenseComplex) + if StoreT1 !== StoreT2 + T1,T2 = promote(T1,T2) + end + _contract!(R,T1,T2,props) return R end -function _contract!(C::DenseTensor{T,NC}, - A::DenseTensor{T,NA}, - B::DenseTensor{T,NB}, - props::ContractionProperties, - α::T=one(T), - β::T=zero(T)) where {T,NC,NA,NB} +function _contract!(C::DenseTensor{El,NC}, + A::DenseTensor{El,NA}, + B::DenseTensor{El,NB}, + props::ContractionProperties) where {El,NC,NA,NB} tA = 'N' if props.permuteA aref = reshape(permutedims(A,NTuple{NA,Int}(props.PA)),props.dmid,props.dleft) @@ -285,6 +339,8 @@ function _contract!(C::DenseTensor{T,NC}, # TODO: this logic may be wrong if props.permuteC + # Need to copy here since we will be permuting + # into C later cref = reshape(copy(C),props.dleft,props.dright) else if Ctrans(props) @@ -305,7 +361,7 @@ function _contract!(C::DenseTensor{T,NC}, # TODO: make sure this is fast with Tensor{ElT,2}, or # convert aref and bref to Matrix - BLAS.gemm!(tA,tB,α,aref,bref,β,cref) + BLAS.gemm!(tA,tB,one(El),aref,bref,zero(El),cref) if props.permuteC permutedims!(C,reshape(cref,props.newCrange...),NTuple{NC,Int}(props.PC)) @@ -445,14 +501,12 @@ end function eigenHermitian(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} + truncate::Bool = get(kwargs,:truncate,false) maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) mindim::Int = get(kwargs,:mindim,1) cutoff::Float64 = get(kwargs,:cutoff,0.0) absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - #tags::TagSet = get(kwargs,:lefttags,"Link,u") - #lefttags::TagSet = get(kwargs,:lefttags,tags) - #righttags::TagSet = get(kwargs,:righttags,prime(lefttags)) DM,UM = eigen(Hermitian(Matrix(T))) @@ -461,13 +515,17 @@ function eigenHermitian(T::DenseTensor{ElT,2,IndsT}; DM = DM[p] UM = UM[:,p] - truncate!(DM;maxdim=maxdim, - cutoff=cutoff, - absoluteCutoff=absoluteCutoff, - doRelCutoff=doRelCutoff) - dD = length(DM) - if dD < size(UM,2) - UM = UM[:,1:dD] + if truncate + truncate!(DM;maxdim=maxdim, + cutoff=cutoff, + absoluteCutoff=absoluteCutoff, + doRelCutoff=doRelCutoff) + dD = length(DM) + if dD < size(UM,2) + UM = UM[:,1:dD] + end + else + dD = length(DM) end # Make the new indices to go onto U and V diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index 2faf6bd213..a86bd5c09a 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -6,13 +6,6 @@ export Diag struct Diag{T} <: TensorStorage data::T Diag{T}(data) where {T} = new{T}(data) - #Diag{T}(data) where {T<:Number} = new{T}(convert(T,x)) - #Diag{T}(size::Integer) where {T<:AbstractVector{S}} where S = new{T}(Vector{S}(undef,size)) - #Diag{T}(x::Number,size::Integer) where {T<:AbstractVector} = new{T}(fill(convert(eltype(T),x),size)) - #Diag{T}() where {T} = new{T}(Vector{T}()) - # Make a uniform Diag storage - # Determine the storage type parameter from the input - #Diag(data::T) where T = new{T}(data) end data(D::Diag) = D.data @@ -48,11 +41,20 @@ Base.similar(D::UniformDiag{T}) where {T<:Number} = Diag{T}(zero(T)) Base.similar(::Type{<:UniformDiag{T}},inds) where {T<:Number} = Diag{T}(zero(T)) # TODO: make this work for other storage besides Vector -Base.zeros(::Type{Diag{T}},dim::Int64) where {T<:AbstractVector} = Diag{T}(zeros(eltype(T),dim)) -Base.zeros(::Type{Diag{T}},dim::Int64) where {T<:Number} = Diag{T}(zero(T)) +Base.zeros(::Type{Diag{T}},dim::Int64) where + {T<:AbstractVector} = Diag{T}(zeros(eltype(T),dim)) +Base.zeros(::Type{Diag{T}},dim::Int64) where + {T<:Number} = Diag{T}(zero(T)) -#*(D::Diag{T},x::S) where {T<:AbstractVector,S<:Number} = Dense{promote_type(eltype(D),S)}(x*data(D)) -#*(x::Number,D::Diag) = D*x +function Base.:*(D::Diag{<:Vector{El}},x::S) where {El<:Number,S<:Number} + return Diag{Vector{promote_type(El,S)}}(x*data(D)) +end + +function Base.:*(D::UniformDiag{<:El},x::S) where {El<:Number,S<:Number} + return Diag{promote_type(El,S)}(x*data(D)) +end + +Base.:*(x::Number,D::Diag) = D*x # # Type promotions involving Diag @@ -111,35 +113,32 @@ contraction_output_type(TensorT1::Type{<:DenseTensor}, function contraction_output_type(TensorT1::Type{<:DiagTensor{<:Number,N1}}, TensorT2::Type{<:DiagTensor{<:Number,N2}}, indsR) where {N1,N2} - if ValLength(indsR)===Val(N1+N2) # Turn into is_outer(inds1,inds2,indsR) function? How - # does type inference work with arithmatic of compile time values? + if ValLength(indsR)===Val(N1+N2) + # Turn into is_outer(inds1,inds2,indsR) function? + # How does type inference work with arithmatic of compile time values? return similar_type(dense(promote_type(TensorT1,TensorT2)),indsR) end return similar_type(promote_type(TensorT1,TensorT2),indsR) end # TODO: move to tensor.jl? -zero_contraction_output(TensorT1::Type{<:Tensor},TensorT2::Type{<:Tensor},indsR) = zeros(contraction_output_type(TensorT1,TensorT2,typeof(indsR)),indsR) +zero_contraction_output(TensorT1::Type{<:Tensor},TensorT2::Type{<:Tensor},indsR) = zeros(contraction_output_type(TensorT1,TensorT2,indsR),indsR) function contraction_output(TensorT1::Type{<:DiagTensor}, - TensorT2::Type{<:DiagTensor}, + TensorT2::Type{<:Tensor}, indsR) return zero_contraction_output(TensorT1,TensorT2,indsR) end +contraction_output(TensorT1::Type{<:Tensor},TensorT2::Type{<:DiagTensor},indsR) = contraction_output(TensorT2,TensorT1,indsR) + function contraction_output(TensorT1::Type{<:DiagTensor}, - TensorT2::Type{<:Tensor}, + TensorT2::Type{<:DiagTensor}, indsR) return zero_contraction_output(TensorT1,TensorT2,indsR) end -contraction_output(TensorT1::Type{<:Tensor},TensorT2::Type{<:DiagTensor},indsR) = contraction_output(TensorT2,TensorT1,indsR) function Base.Array(T::DiagTensor{ElT,N}) where {ElT,N} - A = zeros(ElT,dims(T)) - for i = 1:diag_length(T) - diag_inds = CartesianIndex{N}(ntuple(_->i,Val(N))) - A[diag_inds] = T[diag_inds] - end - return A + return Array(dense(T)) end Base.Matrix(T::DiagTensor{<:Number,2}) = Array(T) Base.Vector(T::DiagTensor{<:Number,1}) = Array(T) @@ -147,7 +146,12 @@ Base.Vector(T::DiagTensor{<:Number,1}) = Array(T) diag_length(T::DiagTensor) = minimum(dims(T)) diag_length(T::DiagTensor{<:Number,0}) = 1 -# Add a get_diagindex(T::DiagTensor,ind::Int) function to avoid checking the input +getdiag(T::DiagTensor,ind::Int) = store(T)[ind] + +setdiag!(T::DiagTensor,val,ind::Int) = (store(T)[ind] = val) + +setdiag(T::DiagTensor,val::ElR,ind::Int) where {ElR} = Tensor(Diag{ElR}(val),inds(T)) + function Base.getindex(T::DiagTensor{ElT,N},inds::Vararg{Int,N}) where {ElT,N} if all(==(inds[1]),inds) return store(T)[inds[1]] @@ -160,7 +164,8 @@ Base.getindex(T::DiagTensor{<:Number,0}) = store(T)[1] # Set diagonal elements # Throw error for off-diagonal -function Base.setindex!(T::DiagTensor{<:Number,N},val,inds::Vararg{Int,N}) where {N} +function Base.setindex!(T::DiagTensor{<:Number,N}, + val,inds::Vararg{Int,N}) where {N} all(==(inds[1]),inds) || error("Cannot set off-diagonal element of Diag storage") return store(T)[inds[1]] = val end @@ -171,196 +176,54 @@ function Base.setindex!(T::UniformDiagTensor{<:Number,N},val,inds::Vararg{Int,N} error("Cannot set elements of a uniform Diag storage") end -# convert to Dense -function dense(D::DiagTensor) - return Tensor(Dense{eltype(D)}(vec(Array(D))),inds(D)) +function dense(::Type{<:Tensor{ElT,N,StoreT,IndsT}}) where {ElT,N, + StoreT<:Diag,IndsT} + return Tensor{ElT,N,dense(StoreT),IndsT} end -function dense(::Type{<:DiagTensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT,IndsT} - return Tensor{ElT,N,dense(StoreT),IndsT} +# convert to Dense +function dense(T::TensorT) where {TensorT<:DiagTensor} + R = zeros(dense(TensorT),inds(T)) + for i = 1:diag_length(T) + setdiag!(R,getdiag(T,i),i) + end + return R end function outer!(R::DenseTensor{<:Number,NR}, T1::DiagTensor{<:Number,N1}, T2::DiagTensor{<:Number,N2}) where {NR,N1,N2} for i1 = 1:diag_length(T1), i2 = 1:diag_length(T2) - diag_inds1 = CartesianIndex{N1}(ntuple(_->i1,Val(N1))) - diag_inds2 = CartesianIndex{N2}(ntuple(_->i2,Val(N2))) indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR))) - R[indsR] = T1[diag_inds1]*T2[diag_inds2] + R[indsR] = getdiag(T1,i1)*getdiag(T2,i2) end return R end -# Right an in-place version -function outer(T1::DiagTensor{ElT1,N1},T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} - indsR = unioninds(inds(T1),inds(T2)) - R = Tensor(Dense{promote_type(ElT1,ElT2)}(zeros(dim(indsR))),indsR) - outer!(R,T1,T2) +# TODO: write an optimized version of this? +function outer!(R::DenseTensor{ElR}, + T1::DenseTensor, + T2::DiagTensor) where {ElR} + R .= zero(ElR) + outer!(R,T1,dense(T2)) return R end -# For now, we will just make them dense since the output is dense anyway -#function storage_outer(D1::Diag{T1},is1::IndexSet,D2::Diag{T2},is2::IndexSet) where {T1,T2} -# A1 = storage_dense(D1,is1) -# A2 = storage_dense(D2,is2) -# return storage_outer(A1,is1,A2,is2) -#end - -# Convert to an array by setting the diagonal elements -#function storage_convert(::Type{Array},D::Diag,is::IndexSet) -# A = zeros(eltype(D),dims(is)) -# for i = 1:minDim(is) -# A[fill(i,length(is))...] = D[i] -# end -# return A -#end - -#function storage_convert(::Type{Dense{T}},D::Diag,is::IndexSet) where T -# return Dense{T}(vec(storage_convert(Array,D,is))) -#end - -storage_fill!(D::Diag,x::Number) = fill!(data(D),x) - -#function diag_getindex(Tstore::Diag{<:AbstractVector}, -# val::Int) -# return getindex(data(Tstore),val) -#end - -# Uniform case -#function diag_getindex(Tstore::Diag{<:Number}, -# val::Int) -# return data(Tstore) -#end - -# Get diagonal elements -# Gives zero for off-diagonal elements -#function storage_getindex(Tstore::Diag{T}, -# Tis::IndexSet, -# vals::Union{Int, AbstractVector{Int}}...) where {T} -# if all(==(vals[1]),vals) -# return diag_getindex(Tstore,vals[1]) -# else -# return zero(eltype(T)) -# end -#end - -# Set diagonal elements -# Throw error for off-diagonal -#function storage_setindex!(Tstore::Diag{<:AbstractVector}, -# Tis::IndexSet, -# x::Union{<:Number, AbstractArray{<:Number}}, -# vals::Union{Int, AbstractVector{Int}}...) -# all(y->y==vals[1],vals) || error("Cannot set off-diagonal element of Diag storage") -# return setindex!(data(Tstore),x,vals[1]) -#end -# -#function storage_setindex!(Tstore::Diag{<:Number}, -# Tis::IndexSet, -# x::Union{<:Number, AbstractArray{<:Number}}, -# vals::Union{Int, AbstractVector{Int}}...) -# error("Cannot set elements of a uniform Diag storage") -#end - -# Add generic Diag's in-place -#function _add!(Bstore::Diag, -# Bis::IndexSet, -# Astore::Diag, -# Ais::IndexSet, -# x::Number = 1) -# # This is just used to check if the index sets -# # are permutations of each other, maybe -# # this should be at the ITensor level -# p = calculate_permutation(Bis,Ais) -# Adata = data(Astore) -# Bdata = data(Bstore) -# if x == 1 -# Bdata .+= Adata -# else -# Bdata .+= x .* Adata -# end -#end - -# Uniform Diag case -#function _add!(Bstore::Diag{BT}, -# Bis::IndexSet, -# Astore::Diag{AT}, -# Ais::IndexSet, -# x::Number = 1) where {BT<:Number,AT<:Number} -# # This is just used to check if the index sets -# # are permutations of each other, maybe -# # this should be at the ITensor level -# p = calculate_permutation(Bis,Ais) -# if x == 1 -# Bstore.data += data(Astore) -# else -# Bstore.data += x * data(Astore) -# end -#end - -#function _add!(Bstore::Dense, -# Bis::IndexSet, -# Astore::Diag, -# Ais::IndexSet, -# x::Number = 1) -# # This is just used to check if the index sets -# # are permutations of each other -# p = calculate_permutation(Bis,Ais) -# Bdata = data(Bstore) -# mindim = minDim(Bis) -# reshapeBdata = reshape(Bdata,dims(Bis)) -# if x == 1 -# for ii = 1:mindim -# # TODO: this should be optimized, maybe use -# # strides instead of reshape? -# reshapeBdata[fill(ii,order(Bis))...] += Astore[ii] -# end -# else -# for ii = 1:mindim -# reshapeBdata[fill(ii,order(Bis))...] += x*Astore[ii] -# end -# end -#end - -# TODO: implement this -# This should not require any permutation -function storage_copyto!(Bstore::Diag, - Bis::IndexSet, - Astore::Diag, - Ais::IndexSet, - x::Number = 1) - p = calculate_permutation(Bis,Ais) - Adata = data(Astore) - Bdata = data(Bstore) - if is_trivial_permutation(p) - if x == 1 - Bdata .= Adata - else - Bdata .= x .* Adata - end - else - reshapeBdata = reshape(Bdata,dims(Bis)) - reshapeAdata = reshape(Adata,dims(Ais)) - if x == 1 - _add!(reshapeBdata,reshapeAdata,p,(a,b)->b) - else - _add!(reshapeBdata,reshapeAdata,p,(a,b)->x*b) - end - end -end - -# TODO: Move to tensorstorage.jl? -function storage_mult!(Astore::Diag, - x::Number) - Adata = data(Astore) - rmul!(Adata, x) +function outer!(R::DenseTensor{ElR}, + T1::DiagTensor, + T2::DenseTensor) where {ElR} + R .= zero(ElR) + outer!(R,dense(T1),T2) + return R end -# TODO: Move to tensorstorage.jl? -function storage_mult(Astore::Diag{T}, - x::Number) where T - Bdata = x*data(Astore) - return Diag(Bdata) +# Right an in-place version +function outer(T1::DiagTensor{ElT1,N1}, + T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} + indsR = unioninds(inds(T1),inds(T2)) + R = Tensor(Dense{promote_type(ElT1,ElT2)}(zeros(dim(indsR))),indsR) + outer!(R,T1,T2) + return R end function Base.permutedims!(R::DiagTensor{<:Number,N}, @@ -368,8 +231,7 @@ function Base.permutedims!(R::DiagTensor{<:Number,N}, perm::NTuple{N,Int},f::Function=(r,t)->t) where {N} # TODO: check that inds(R)==permute(inds(T),perm)? for i=1:diag_length(R) - diag_inds = CartesianIndex{N}(ntuple(_->i,Val(N))) - @inbounds R[diag_inds] = f(R[diag_inds],T[diag_inds]) + @inbounds setdiag!(R,f(getdiag(R,i),getdiag(T,i)),i) end return R end @@ -384,8 +246,8 @@ end function Base.permutedims(T::UniformDiagTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=identity) where {ElR,ElT,N} - diag_inds = CartesianIndex(ntuple(_->1,Val(N))) - R = Tensor(Diag{promote_type(ElR,ElT)}(f(T[diag_inds])),permute(inds(T),perm)) + R = Tensor(Diag{promote_type(ElR,ElT)}(f(getdiag(T,1))), + permute(inds(T),perm)) return R end @@ -402,8 +264,7 @@ function permutedims!!(R::UniformDiagTensor{ElR,N}, T::UniformDiagTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r,t)->t) where {ElR,ElT,N} - diag_inds = CartesianIndex(ntuple(_->1,Val(N))) - R = Tensor(Diag{promote_type(ElR,ElT)}(f(R[diag_inds],T[diag_inds])),inds(R)) + R = Tensor(Diag{promote_type(ElR,ElT)}(f(getdiag(R,1),getdiag(T,1))),inds(R)) return R end @@ -412,8 +273,7 @@ function Base.permutedims!(R::DenseTensor{ElR,N}, perm::NTuple{N,Int}, f::Function = (r,t)->t) where {ElR,ElT,N} for i = 1:diag_length(T) - diag_inds = CartesianIndex(ntuple(_->i,Val(N))) - @inbounds R[diag_inds] = f(R[diag_inds],T[diag_inds]) + @inbounds setdiag!(R,f(getdiag(R,i),getdiag(T,i)),i) end return R end @@ -425,283 +285,64 @@ function permutedims!!(R::DenseTensor{ElR,N}, return R end +function _contract!!(R::UniformDiagTensor{ElR,NR},labelsR, + T1::UniformDiagTensor{<:Number,N1},labelsT1, + T2::UniformDiagTensor{<:Number,N2},labelsT2) where {ElR,NR,N1,N2} + if NR==0 # If all indices of A and B are contracted + # all indices are summed over, just add the product of the diagonal + # elements of A and B + R = setdiag(R,diag_length(T1)*getdiag(T1,1)*getdiag(T2,1),1) + else + # not all indices are summed over, set the diagonals of the result + # to the product of the diagonals of A and B + R = setdiag(R,getdiag(T1,1)*getdiag(T2,1),1) + end + return R +end + function _contract!(R::DiagTensor{ElR,NR},labelsR, T1::DiagTensor{<:Number,N1},labelsT1, T2::DiagTensor{<:Number,N2},labelsT2) where {ElR,NR,N1,N2} if NR==0 # If all indices of A and B are contracted # all indices are summed over, just add the product of the diagonal # elements of A and B - # Need to set to zero since - # Cdata is originally uninitialized memory - # (so causes random output) - - R[1] = zero(ElR) + Rdiag = zero(ElR) for i = 1:diag_length(T1) - diag_inds = CartesianIndex{N1}(ntuple(_->i,Val(N1))) - R[1] += T1[diag_inds]*T2[diag_inds] + Rdiag += getdiag(T1,i)*getdiag(T2,i) end + setdiag!(R,Rdiag,1) else min_dim = min(diag_length(T1),diag_length(T2)) # not all indices are summed over, set the diagonals of the result # to the product of the diagonals of A and B for i = 1:min_dim - diag_inds_R = CartesianIndex{NR}(ntuple(_->i,Val(NR))) - diag_inds_T1 = CartesianIndex{N1}(ntuple(_->i,Val(N1))) - diag_inds_T2 = CartesianIndex{N2}(ntuple(_->i,Val(N2))) - R[diag_inds_R] = T1[diag_inds_T1]*T2[diag_inds_T2] + setdiag!(R,getdiag(T1,i)*getdiag(T2,i),i) end end + return R end -# TODO: make this a special version of storage_add!() -# This shouldn't do anything to the data -#function storage_permute!(Bstore::Diag, -# Bis::IndexSet, -# Astore::Diag, -# Ais::IndexSet) -# p = calculate_permutation(Bis,Ais) -# Adata = data(Astore) -# Bdata = data(Bstore) -# if is_trivial_permutation(p) -# Bdata .= Adata -# else -# reshapeBdata = reshape(Bdata,dims(Bis)) -# permutedims!(reshapeBdata,reshape(Adata,dims(Ais)),p) -# end -#end - -# TODO: move this to tensorstorage.jl? -function storage_dag(Astore::Diag,Ais::IndexSet) - return dag(Ais),storage_conj(Astore) -end - -# TODO: move this to tensorstorage.jl? -function storage_scalar(D::Diag{T}) where {T<:AbstractVector} - length(D)==1 && return D[1] - throw(ErrorException("Cannot convert Diag -> Number for length of data greater than 1")) -end - -function storage_scalar(D::Diag{T}) where {T<:Number} - return data(T) -end - -# Contract function for Diag*Dense -#function _contract(Cinds::IndexSet, -# Clabels::Vector{Int}, -# Astore::Diag{Vector{SA}}, -# Ainds::IndexSet, -# Alabels::Vector{Int}, -# Bstore::Dense{SB}, -# Binds::IndexSet, -# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} -# SC = promote_type(SA,SB) -# -# # Convert the arrays to a common type -# # since we will call BLAS -# Astore = convert(Diag{Vector{SC}},Astore) -# Bstore = convert(Dense{SC},Bstore) -# -# Adims = dims(Ainds) -# Bdims = dims(Binds) -# Cdims = dims(Cinds) -# -# # Create storage for output tensor -# # This needs to be filled with zeros, since in general -# # the output of Diag*Dense will be sparse -# Cstore = Dense{SC}(zero(SC),prod(Cdims)) -# -# # This seems unnecessary, and makes this function -# # non-generic. I think Dense should already store -# # the properly-shaped data (and be parametrized by the -# # order, and maybe store the size?) -# Adata = data(Astore) -# Bdata = reshape(data(Bstore),Bdims) -# Cdata = reshape(data(Cstore),Cdims) -# -# # Functions to do the contraction -# if(length(Alabels)==0) -# _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) -# elseif(length(Blabels)==0) -# _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) -# else -# _contract_diag_dense!(Cdata,Clabels,Adata,Alabels,Bdata,Blabels) -# end -# -# return Cstore -#end - -# Contract function for Diag uniform * Dense -#function _contract(Cinds::IndexSet, -# Clabels::Vector{Int}, -# Astore::Diag{SA}, -# Ainds::IndexSet, -# Alabels::Vector{Int}, -# Bstore::Dense{SB}, -# Binds::IndexSet, -# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} -# SC = promote_type(SA,SB) -# -# # Convert the arrays to a common type -# # since we will call BLAS -# #Astore = convert(Diag{SC},Astore) -# #Bstore = convert(Dense{SC},Bstore) -# -# Bdims = dims(Binds) -# Cdims = dims(Cinds) -# -# # Create storage for output tensor -# # This needs to be filled with zeros, since in general -# # the output of Diag*Dense will be sparse -# Cstore = Dense{SC}(zero(SC),prod(Cdims)) -# -# # This seems unnecessary, and makes this function -# # non-generic. I think Dense should already store -# # the properly-shaped data (and be parametrized by the -# # order, and maybe store the size?) -# Adata = data(Astore) -# Bdata = reshape(data(Bstore),Bdims) -# Cdata = reshape(data(Cstore),Cdims) -# -# # Functions to do the contraction -# if(length(Alabels)==0) -# _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) -# elseif(length(Blabels)==0) -# _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) -# elseif(length(Alabels)==2 && length(Clabels)==length(Blabels)) -# # This is just a replacement of the indices -# # TODO: This logic should be higher up -# for i = 1:length(Clabels) -# if Blabels[i] > 0 -# Cinds[i] = Binds[i] -# else -# if Alabels[1] > 0 -# Cinds[i] = Ainds[1] -# else -# Cinds[i] = Ainds[2] -# end -# end -# end -# Cdata .= Adata .* Bdata -# else -# _contract_diag_dense!(Cstore,Cinds,Clabels, -# Astore,Ainds,Alabels, -# Bstore,Binds,Blabels) -# end -# -# return Cstore -#end - -# Contract function for Dense*Diag (calls Diag*Dense) -#function _contract(Cinds::IndexSet, -# Clabels::Vector{Int}, -# Astore::Dense, -# Ainds::IndexSet, -# Alabels::Vector{Int}, -# Bstore::Diag, -# Binds::IndexSet, -# Blabels::Vector{Int}) -# return _contract(Cinds,Clabels, -# Bstore,Binds,Blabels, -# Astore,Ainds,Alabels) -#end - -# Contract function for Diag*Diag -#function _contract(Cinds::IndexSet, -# Clabels::Vector{Int}, -# Astore::Diag{Vector{SA}}, -# Ainds::IndexSet, -# Alabels::Vector{Int}, -# Bstore::Diag{Vector{SB}}, -# Binds::IndexSet, -# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} -# SC = promote_type(SA,SB) -# -# # Convert the arrays to a common type -# # since we will call BLAS -# Astore = convert(Diag{Vector{SC}},Astore) -# Bstore = convert(Diag{Vector{SC}},Bstore) -# Cstore = Diag{Vector{SC}}(minDim(Cinds)) -# -# Adata = data(Astore) -# Bdata = data(Bstore) -# Cdata = data(Cstore) -# -# if(length(Alabels)==0) -# _contract_scalar!(Cdata,Clabels,Bdata,Blabels,Adata[1]) -# elseif(length(Blabels)==0) -# _contract_scalar!(Cdata,Clabels,Adata,Alabels,Bdata[1]) -# else -# _contract_diag_diag!(Cdata,Clabels,Adata,Alabels,Bdata,Blabels) -# end -# -# return Cstore -#end - -# Contract function for Diag uniform * Diag uniform -#function _contract(Cinds::IndexSet, -# Clabels::Vector{Int}, -# Astore::Diag{SA}, -# Ainds::IndexSet, -# Alabels::Vector{Int}, -# Bstore::Diag{SB}, -# Binds::IndexSet, -# Blabels::Vector{Int}) where {SA<:Number,SB<:Number} -# SC = promote_type(SA,SB) -# -# # Convert the arrays to a common type -# # since we will call BLAS -# Astore = convert(Diag{SC},Astore) -# Bstore = convert(Diag{SC},Bstore) -# Cstore = Diag{SC}(minDim(Cinds)) -# -# Adata = data(Astore) -# Bdata = data(Bstore) -# Cdata = data(Cstore) -# -# if length(Clabels) == 0 # If all indices of A and B are contracted -# # all indices are summed over, just add the product of the diagonal -# # elements of A and B -# min_dim = minimum(dims(Ainds)) # == length(Bdata) -# # Need to set to zero since -# # Cdata is originally uninitialized memory -# # (so causes random output) -# Cdata = zero(SC) -# for i = 1:min_dim -# Cdata += Adata*Bdata -# end -# else -# # not all indices are summed over, set the diagonals of the result -# # to the product of the diagonals of A and B -# Cdata = Adata*Bdata -# end -# -# Cstore.data = Cdata -# return Cstore -#end - function _contract!(C::DenseTensor{ElC,NC},Clabels, A::DiagTensor{ElA,NA},Alabels, - B::DenseTensor{ElB,NB},Blabels) where {ElA,NA,ElB,NB,ElC,NC} - if all(i -> i < 0, Blabels) # If all of B is contracted - # Can rewrite NC+NB==NA + B::DenseTensor{ElB,NB},Blabels) where {ElA,NA, + ElB,NB, + ElC,NC} + if all(i -> i < 0, Blabels) + # If all of B is contracted + # TODO: can also check NC+NB==NA min_dim = minimum(dims(B)) if length(Clabels) == 0 # all indices are summed over, just add the product of the diagonal # elements of A and B for i = 1:min_dim - diag_inds_A = CartesianIndex{NA}(ntuple(_->i,Val(NA))) - diag_inds_B = CartesianIndex{NB}(ntuple(_->i,Val(NB))) - C[1] += A[diag_inds_A]*B[diag_inds_B] + setdiag!(C,getdiag(C,1)+getdiag(A,i)*getdiag(B,i),1) end else # not all indices are summed over, set the diagonals of the result # to the product of the diagonals of A and B # TODO: should we make this return a Diag storage? for i = 1:min_dim - diag_inds_A = CartesianIndex{NA}(ntuple(_->i,Val(NA))) - diag_inds_B = CartesianIndex{NB}(ntuple(_->i,Val(NB))) - diag_inds_C = CartesianIndex{NC}(ntuple(_->i,Val(NC))) - C[diag_inds_C] = A[diag_inds_A]*B[diag_inds_B] + setdiag!(C,getdiag(A,i)*getdiag(B,i),i) end end else @@ -712,7 +353,7 @@ function _contract!(C::DenseTensor{ElC,NC},Clabels, nbu = 0 for ib = 1:length(Blabels) ia = findfirst(==(Blabels[ib]),Alabels) - if(!isnothing(ia)) + if !isnothing(ia) b_cstride += stride(B,ib) bstart += astarts[ia]*stride(B,ib) else @@ -723,7 +364,7 @@ function _contract!(C::DenseTensor{ElC,NC},Clabels, c_cstride = 0 for ic = 1:length(Clabels) ia = findfirst(==(Clabels[ic]),Alabels) - if(!isnothing(ia)) + if !isnothing(ia) c_cstride += stride(C,ic) cstart += astarts[ia]*stride(C,ic) end @@ -759,205 +400,15 @@ function _contract!(C::DenseTensor{ElC,NC},Clabels, coffset += ii*custride[i] end for j in 1:diag_length(A) - diag_inds_A = CartesianIndex{NA}(ntuple(_->j,Val(NA))) - C[cstart+j*c_cstride+coffset] += A[diag_inds_A]*B[bstart+j*b_cstride+boffset] - end - end - end -end -_contract!(C::DenseTensor,Clabels,A::DenseTensor,Alabels,B::DiagTensor,Blabels) = _contract!(C,Clabels,B,Blabels,A,Alabels) - -#function _contract_diag_dense!(Cdata::Array{T,NC},Clabels::Vector{Int}, -# Adata::Vector{T},Alabels::Vector{Int}, -# Bdata::Array{T,NB},Blabels::Vector{Int}) where {T,NB,NC} -# if all(i -> i < 0, Blabels) # If all of B is contracted -# min_dim = minimum(size(Bdata)) -# if length(Clabels) == 0 -# # all indices are summed over, just add the product of the diagonal -# # elements of A and B -# for i = 1:min_dim -# Cdata[1] += Adata[i]*Bdata[ntuple(_->i,Val(NB))...] -# end -# else -# # not all indices are summed over, set the diagonals of the result -# # to the product of the diagonals of A and B -# # TODO: should we make this return a Diag storage? -# for i = 1:min_dim -# Cdata[ntuple(_->i,Val(NC))...] = Adata[i]*Bdata[ntuple(_->i,Val(NB))...] -# end -# end -# else -# astarts = zeros(Int,length(Alabels)) -# bstart = 0 -# cstart = 0 -# b_cstride = 0 -# nbu = 0 -# for ib = 1:length(Blabels) -# ia = findfirst(==(Blabels[ib]),Alabels) -# if(!isnothing(ia)) -# b_cstride += stride(Bdata,ib) -# bstart += astarts[ia]*stride(Bdata,ib) -# else -# nbu += 1 -# end -# end -# -# c_cstride = 0 -# for ic = 1:length(Clabels) -# ia = findfirst(==(Clabels[ic]),Alabels) -# if(!isnothing(ia)) -# c_cstride += stride(Cdata,ic) -# cstart += astarts[ia]*stride(Cdata,ic) -# end -# end -# -# # strides of the uncontracted dimensions of -# # Bdata -# bustride = zeros(Int,nbu) -# custride = zeros(Int,nbu) -# # size of the uncontracted dimensions of -# # Bdata, to be used in CartesianIndices -# busize = zeros(Int,nbu) -# n = 1 -# for ib = 1:length(Blabels) -# if Blabels[ib] > 0 -# bustride[n] = stride(Bdata,ib) -# busize[n] = size(Bdata,ib) -# ic = findfirst(==(Blabels[ib]),Clabels) -# custride[n] = stride(Cdata,ic) -# n += 1 -# end -# end -# -# boffset_orig = 1-sum(strides(Bdata)) -# coffset_orig = 1-sum(strides(Cdata)) -# cartesian_inds = CartesianIndices(Tuple(busize)) -# for inds in cartesian_inds -# boffset = boffset_orig -# coffset = coffset_orig -# for i in 1:nbu -# ii = inds[i] -# boffset += ii*bustride[i] -# coffset += ii*custride[i] -# end -# for j in 1:length(Adata) -# Cdata[cstart+j*c_cstride+coffset] += Adata[j]*Bdata[bstart+j*b_cstride+boffset] -# end -# end -# end -#end - -function _contract_diag_dense!(Cstore::Dense,Cinds,Clabels::Vector{Int}, - Astore::Diag{TA},Ainds,Alabels::Vector{Int}, - Bstore::Dense,Binds,Blabels::Vector{Int}) where {TA<:Number} - if all(i -> i < 0, Blabels) # If all of B is contracted - Bdims = dims(Binds) - min_dim = minimum(Bdims) - rB = reshape(data(Bstore),Bdims) - if length(Clabels) == 0 - # all indices are summed over, just add the product of the diagonal - # elements of A and B - # TODO: replace this with manual strides - for i = 1:min_dim - Cstore[1] += Astore[i]*rB[ntuple(_->i,length(Binds))...] - end - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - # TODO: should we make this return a Diag storage? - for i = 1:min_dim - Cstore[ntuple(_->i,length(Cinds))...] = Astore[i]*rB[ntuple(_->i,length(Binds))...] - end - end - else - astarts = zeros(Int,length(Alabels)) - bstart = 0 - cstart = 0 - b_cstride = 0 - nbu = 0 - for ib = 1:length(Blabels) - ia = findfirst(==(Blabels[ib]),Alabels) - if(!isnothing(ia)) - b_cstride += stride(Binds,ib) - bstart += astarts[ia]*stride(Binds,ib) - else - nbu += 1 - end - end - - c_cstride = 0 - for ic = 1:length(Clabels) - ia = findfirst(==(Clabels[ic]),Alabels) - if(!isnothing(ia)) - c_cstride += stride(Cinds,ic) - cstart += astarts[ia]*stride(Cinds,ic) - end - end - - # strides of the uncontracted dimensions of - # Bdata - bustride = zeros(Int,nbu) - custride = zeros(Int,nbu) - # size of the uncontracted dimensions of - # Bdata, to be used in CartesianIndices - busize = zeros(Int,nbu) - custride = zeros(Int,nbu) - # size of the uncontracted dimensions of - # Bdata, to be used in CartesianIndices - busize = zeros(Int,nbu) - n = 1 - for ib = 1:length(Blabels) - if Blabels[ib] > 0 - bustride[n] = stride(Binds,ib) - busize[n] = dim(Binds,ib) #size(Bstore,ib) - ic = findfirst(==(Blabels[ib]),Clabels) - custride[n] = stride(Cinds,ic) - n += 1 - end - end - - min_dim = minimum(dims(Binds)) - boffset_orig = 1-sum(strides(Binds)) - coffset_orig = 1-sum(strides(Cinds)) - cartesian_inds = CartesianIndices(Tuple(busize)) - for inds in cartesian_inds - boffset = boffset_orig - coffset = coffset_orig - for i in 1:nbu - ii = inds[i] - boffset += ii*bustride[i] - coffset += ii*custride[i] - end - for j in 1:min_dim - Cstore[cstart+j*c_cstride+coffset] += Astore[j]*Bstore[bstart+j*b_cstride+boffset] + C[cstart+j*c_cstride+coffset] += getdiag(A,j)* + B[bstart+j*b_cstride+boffset] end end end end - - -# Maybe this works for uniform storage as well -#function _contract_diag_diag!(Cdata::Vector{T},Clabels::Vector{Int}, -# Adata::Vector{T},Alabels::Vector{Int}, -# Bdata::Vector{T},Blabels::Vector{Int}) where T -# if length(Clabels) == 0 # If all indices of A and B are contracted -# # all indices are summed over, just add the product of the diagonal -# # elements of A and B -# Adim = length(Adata) # == length(Bdata) -# # Need to set to zero since -# # Cdata is originally uninitialized memory -# # (so causes random output) -# Cdata[1] = zero(T) -# for i = 1:Adim -# Cdata[1] += Adata[i]*Bdata[i] -# end -# else -# min_dim = min(length(Adata),length(Bdata)) -# # not all indices are summed over, set the diagonals of the result -# # to the product of the diagonals of A and B -# for i = 1:min_dim -# Cdata[i] = Adata[i]*Bdata[i] -# end -# end -#end +_contract!(C::DenseTensor,Clabels, + A::DenseTensor,Alabels, + B::DiagTensor,Blabels) = _contract!(C,Clabels, + B,Blabels, + A,Alabels) diff --git a/test/runtests.jl b/test/runtests.jl index 2332e457fd..038e880471 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,9 +6,9 @@ using ITensors, Test "test_smallstring.jl", "test_index.jl", "test_indexset.jl", - "test_itensor.jl", + "test_itensor_dense.jl", + "test_itensor_diag.jl", "test_contract.jl", - "test_diagITensor.jl", "test_combiner.jl", "test_trg.jl", "test_ctmrg.jl", diff --git a/test/test_combiner.jl b/test/test_combiner.jl index 42e78cf357..0979d4ca00 100644 --- a/test/test_combiner.jl +++ b/test/test_combiner.jl @@ -10,55 +10,55 @@ A = randomITensor(i, j, k, l) @testset "Two index combiner" begin for inds_ij ∈ permutations([i,j]) - C = combiner(inds_ij...) + C,c = combiner(inds_ij...) B = A*C @test hasinds(B, l, k) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_il ∈ permutations([i,l]) - C = combiner(inds_il...) + C,c = combiner(inds_il...) B = A*C @test hasinds(B, j, k) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_ik ∈ permutations([i,k]) - C = combiner(inds_ik...) + C,c = combiner(inds_ik...) B = A*C @test hasinds(B, j, l) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_jk ∈ permutations([j,k]) - C = combiner(inds_jk...) + C,c = combiner(inds_jk...) B = A*C @test hasinds(B, i, l) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_jl ∈ permutations([j,l]) - C = combiner(inds_jl...) + C,c = combiner(inds_jl...) B = A*C @test hasinds(B, i, k) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_kl ∈ permutations([k,l]) - C = combiner(inds_kl...) + C,c = combiner(inds_kl...) B = A*C @test hasinds(B, i, j) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) @@ -67,28 +67,28 @@ end @testset "Three index combiner" begin for inds_ijl ∈ permutations([i,j,l]) - C = combiner(inds_ijl...) + C,c = combiner(inds_ijl...) B = A*C @test hasindex(B, k) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_ijk ∈ permutations([i,j,k]) - C = combiner(inds_ijk...) + C,c = combiner(inds_ijk...) B = A*C @test hasindex(B, l) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) end for inds_jkl ∈ permutations([j,k,l]) - C = combiner(inds_jkl...) + C,c = combiner(inds_jkl...) B = A*C @test hasindex(B, i) - @test combinedindex(C) == commonindex(B, C) + @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) @test Array(permute(D, i, j, k, l)) == Array(A) diff --git a/test/test_ctmrg.jl b/test/test_ctmrg.jl index aa6a2ab3a1..9f729f32e4 100644 --- a/test/test_ctmrg.jl +++ b/test/test_ctmrg.jl @@ -11,27 +11,38 @@ function ctmrg(T::ITensor, Clu = addtags(Clu,"orig","link") Al = addtags(Al,"orig","link") for i = 1:nsteps + ## Get the grown corner transfer matrix (CTM) - Au = replacetags(replacetags(replacetags(Al,"down","left","link"),"up","right","link"),"left","up","site") + Au = replacetags(Al,"down,link","left,link") + Au = replacetags(Au,"up,link","right,link") + Au = replacetags(Au,"left,site","up,site") + Clu⁽¹⁾ = Clu*Al*Au*T - + ## Diagonalize the grown CTM - ld = findindex(Clu⁽¹⁾, "link,down") - sd = findindex(Clu⁽¹⁾, "site,down") - lr = findindex(Clu⁽¹⁾, "link,right") - sr = findindex(Clu⁽¹⁾, "site,right") - Ud,Cdr = eigen(Clu⁽¹⁾, (ld,sd), (lr,sr); - maxdim=χmax, - lefttags="link,down,renorm", - righttags="link,right,renorm") + ld = findindex(Clu⁽¹⁾,"link,down") + sd = findindex(Clu⁽¹⁾,"site,down") + lr = findindex(Clu⁽¹⁾,"link,right") + sr = findindex(Clu⁽¹⁾,"site,right") + + Ud,Cdr = eigenHermitian(Clu⁽¹⁾, (ld,sd), (lr,sr); + maxdim=χmax, + lefttags="link,down,renorm", + righttags="link,right,renorm", + truncate=true) ## The renormalized CTM is the diagonal matrix of eigenvalues - Clu = replacetags(replacetags(replacetags(Cdr,"renorm","orig"),"down","up"),"right","left") + Clu = replacetags(Cdr,"renorm","orig") + Clu = replacetags(Clu,"down","up") + Clu = replacetags(Clu,"right","left") Clu = Clu/norm(Clu) ## Calculate the renormalized half row transfer matrix (HRTM) - Al = Al*replacetags(Ud,"down","up")*T*Ud - Al = replacetags(replacetags(Al,"renorm","orig"),"right","left","site") + Uu = replacetags(Ud,"down","up") + + Al = Al*Uu*T*Ud + Al = replacetags(Al,"renorm","orig") + Al = replacetags(Al,"right,site","left,site") Al = Al/norm(Al) end Clu = removetags(Clu,"orig") diff --git a/test/test_indexset.jl b/test/test_indexset.jl index 68ae3840d2..58000ab93a 100644 --- a/test/test_indexset.jl +++ b/test/test_indexset.jl @@ -43,24 +43,24 @@ using ITensors, I3 = IndexSet(j,l) @test hassameinds(I1,(k,j,i)) @test uniqueindex(I1,(I2,I3)) == i - @test uniqueindex(I1,IndexSet(k, j, i)) == Index() + @test uniqueindex(I1,IndexSet(k, j, i)) === nothing @test uniqueinds(I1,I2) == IndexSet(i,j) @test setdiff(I1,I2) == IndexSet(i,j) @test hassameinds(uniqueinds(I1,I2),(j,i)) @test commoninds(I1,I2) == IndexSet(k) @test commonindex(I1,I2) == k - @test commonindex(I1,IndexSet(l)) == Index() + @test commonindex(I1,IndexSet(l)) === nothing @test commoninds(I1,(j,l)) == IndexSet(j) @test commonindex(I1,(j,l)) == j @test commoninds(I1,(j,k)) == IndexSet(j,k) @test hassameinds(commoninds(I1,(j,k,l)),(j,k)) @test findinds(I1,"i") == IndexSet(i) @test findindex(I1,"j") == j - @test findindex(I1,"l") == Index() - @test findindex(I1,i) == 1 - @test findindex(I1,j) == 2 - @test findindex(I1,k) == 3 - @test findindex(I1,Index(2)) == 0 + @test findindex(I1,"l") === nothing + @test indexposition(I1,i) == 1 + @test indexposition(I1,j) == 2 + @test indexposition(I1,k) == 3 + @test indexposition(I1,Index(2)) === nothing end @testset "commoninds index ordering" begin I = IndexSet(i,k,j) @@ -111,15 +111,15 @@ using ITensors, @test swapprime(I,0,1) == IndexSet(i',j') @test swapprime(I,0,4) == IndexSet(i^4,j^4) I = IndexSet(i,j'') - @test swapprime(I,2,0) == IndexSet(i,j) + @test swapprime(I,2,0) == IndexSet(i'',j) I = IndexSet(i,j'',k,l) - @test swapprime(I,2,0) == IndexSet(i,j,k,l) + @test swapprime(I,2,0) == IndexSet(i'',j,k'',l'') I = IndexSet(i,k'',j'') @test swapprime(I,2,1) == IndexSet(i,k',j') # In-place version: I = IndexSet(i,k'',j''') swapprime!(I,2,0) - @test I == IndexSet(i,k,j''') + @test I == IndexSet(i'',k,j''') # With tags specified: I = IndexSet(i,k,j) @test swapprime(I,0,1,"i") == IndexSet(i',k,j) From 0f2fc27a37278a595d1aa45c862542b7a7c50af6 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Thu, 19 Sep 2019 17:24:43 -0400 Subject: [PATCH 14/27] Get combiner tests passing. Some MPS, MPO and DMRG tests are still failing --- src/decomp.jl | 18 +++++++------ src/indexset.jl | 2 ++ src/itensor.jl | 12 +++++---- src/mps/mpo.jl | 25 +++++++++--------- src/mps/mps.jl | 2 +- src/tensor/combiner.jl | 60 ++++++++++++++++++++++++++++++------------ src/tensor/dense.jl | 5 +++- src/tensor/tensor.jl | 20 ++++++++++++++ test/test_combiner.jl | 31 +++++++++++++--------- test/test_trg.jl | 16 +++++++---- 10 files changed, 129 insertions(+), 62 deletions(-) diff --git a/src/decomp.jl b/src/decomp.jl index 9fc8b18ae4..aa8fc37dd1 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -69,14 +69,16 @@ function svd(A::ITensor, Lpos,Rpos = getperms(inds(A),Lis,Ris) UT,ST,VT = svd(Tensor(A),Lpos,Rpos;kwargs...) U,S,V = ITensor(UT),ITensor(ST),ITensor(VT) - u = commonindex(U,S) - v = commonindex(S,V) - settags!(U,utags,u) - settags!(S,utags,u) - settags!(S,vtags,v) - settags!(V,vtags,v) - u = settags(u,utags) - v = settags(v,vtags) + u₀ = commonindex(U,S) + v₀ = commonindex(S,V) + + u = settags(u₀,utags) + v = settags(u₀,vtags) + + U *= δ(dag(u₀),u) + S = δ(dag(u₀),u)*S*δ(dag(v₀),v) + V *= δ(dag(v₀),v) + return U,S,V,u,v end diff --git a/src/indexset.jl b/src/indexset.jl index ee0403a78d..bfe6441a8c 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -511,6 +511,8 @@ function compute_contraction_labels(Ai::IndexSet{N1}, return (NTuple{N1,Int}(Aind),NTuple{N2,Int}(Bind)) end +# Move this to tensor, since this logic is different +# for contracting different kinds of storage function contract_inds(Ais::IndexSet{N1}, Aind::NTuple{N1,Int}, Bis::IndexSet{N2}, diff --git a/src/itensor.jl b/src/itensor.jl index 858fbd6f9b..d0bf654f58 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -268,16 +268,18 @@ end Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = Tensor(T)[vals...] -function Base.getindex(T::ITensor{N},ivs::Vararg{IndexVal,N}) where {N} +function Base.getindex(T::ITensor{N}, + ivs::Vararg{IndexVal,N}) where {N} p = getperm(inds(T),ivs) vals = permute(val.(ivs),p) return T[vals...] end -# TODO: what is this doing? -#function getindex(T::ITensor,ivs::Union{IndexVal, AbstractVector{IndexVal}}...) -# p = getperm(inds(T),map(x->x isa IndexVal ? x : x[1], ivs)) -# vals = map(x->x isa IndexVal ? val(x) : val.(x), ivs[p]) +# TODO: we should figure out if this is how we want to do +# slicing +#function getindex(T::ITensor,ivs::AbstractVector{IndexVal}...) +# p = getperm(inds(T),map(x->x[1], ivs)) +# vals = map(x->val.(x), ivs[[p...]]) # return Tensor(store(T),inds(T))[vals...] #end diff --git a/src/mps/mpo.jl b/src/mps/mpo.jl index ce5543bc16..ebb26a9d70 100644 --- a/src/mps/mpo.jl +++ b/src/mps/mpo.jl @@ -14,7 +14,7 @@ mutable struct MPO MPO() = new(0,Vector{ITensor}(), 0, 0) - function MPO(N::Int, A::Vector{ITensor}, llim::Int=0, rlim::Int=N+1) + function MPO(N::Int, A::Vector{<:ITensor}, llim::Int=0, rlim::Int=N+1) new(N, A, llim, rlim) end @@ -43,18 +43,25 @@ mutable struct MPO links = Vector{Index}(undef, N) @inbounds for ii ∈ eachindex(sites) si = sites[ii] + dimi = dim(si) spin_op = op(sites, ops[ii], ii) links[ii] = Index(1, "Link,n=$ii") local this_it if ii == 1 this_it = ITensor(links[ii], si, si') - this_it[links[ii](1), si[:], si'[:]] = spin_op[si[:], si'[:]] + for i = 1:dimi, j = 1:dimi + this_it[links[ii](1), si(i), si'(j)] = spin_op[si(i), si'(j)] + end elseif ii == N this_it = ITensor(links[ii-1], si, si') - this_it[links[ii-1](1), si[:], si'[:]] = spin_op[si[:], si'[:]] + for i = 1:dimi, j = 1:dimi + this_it[links[ii-1](1), si(i), si'(j)] = spin_op[si(i), si'(j)] + end else this_it = ITensor(links[ii-1], links[ii], si, si') - this_it[links[ii-1](1), links[ii](1), si[:], si'[:]] = spin_op[si[:], si'[:]] + for i = 1:dimi, j = 1:dimi + this_it[links[ii-1](1), links[ii](1), si(i), si'(j)] = spin_op[si(i), si'(j)] + end end its[ii] = this_it end @@ -123,13 +130,7 @@ function siteindex(A::MPO,x::MPS,j::Integer) return si end -function siteinds(A::MPO,x::MPS) - is = IndexSet(length(A)) - @inbounds for j in eachindex(A) - is[j] = siteindex(A,x,j) - end - return is -end +siteinds(A::MPO,x::MPS) = [siteindex(A,x,j) for j ∈ 1:length(A)] """ dag(m::MPS) @@ -198,7 +199,7 @@ function linkindex(M::MPO,j::Integer) N = length(M) j ≥ length(M) && error("No link index to the right of site $j (length of MPO is $N)") li = commonindex(M[j],M[j+1]) - if isdefault(li) + if isnothing(li) error("linkindex: no MPO link index at link $j") end return li diff --git a/src/mps/mps.jl b/src/mps/mps.jl index 899d79ab4a..de045b6891 100644 --- a/src/mps/mps.jl +++ b/src/mps/mps.jl @@ -24,7 +24,7 @@ mutable struct MPS MPS(N::Int) = MPS(N,Vector{ITensor}(undef,N),0,N+1) function MPS(N::Int, - A::Vector{ITensor}, + A::Vector{<:ITensor}, llim::Int=0, rlim::Int=N+1) new(N,A,llim,rlim) diff --git a/src/tensor/combiner.jl b/src/tensor/combiner.jl index 157346d625..b440836858 100644 --- a/src/tensor/combiner.jl +++ b/src/tensor/combiner.jl @@ -3,7 +3,8 @@ export Combiner struct Combiner <: TensorStorage end -Base.eltype(::Combiner) = Nothing +Base.eltype(::Type{<:Combiner}) = Nothing +Base.eltype(::StoreT) where {StoreT<:Combiner} = eltype(StoreT) Base.promote_rule(::Type{<:Combiner},StorageT::Type{<:Dense}) = StorageT @@ -53,34 +54,59 @@ function count_common(labelsT1,labelsT2) return count end +function intersect_positions(labelsT1,labelsT2) + for i1 = 1:length(labelsT1) + for i2 = 1:length(labelsT2) + if labelsT1[i1] == labelsT2[i2] + return i1,i2 + end + end + end + return nothing +end + +function is_replacement(labelsT1,labelsT2) + return count_unique(labelsT1,labelsT2) == 1 && + count_common(labelsT1,labelsT2) == 1 +end + +function is_combiner(labelsT1,labelsT2) + return count_unique(labelsT1,labelsT2) == 1 && + count_common(labelsT1,labelsT2) > 1 +end + +function is_uncombiner(labelsT1,labelsT2) + return count_unique(labelsT1,labelsT2) > 1 && + count_common(labelsT1,labelsT2) == 1 +end + function contract!!(R::Tensor{<:Number,NR}, labelsR::NTuple{NR}, T1::CombinerTensor{<:Any,N1}, labelsT1::NTuple{N1}, T2::Tensor{<:Number,N2}, labelsT2::NTuple{N2}) where {NR,N1,N2} - @show labelsT1 - @show labelsT2 - @show count_unique(labelsT1,labelsT2) - @show count_common(labelsT1,labelsT2) - if N1 ≤ 1 println("identity") return R elseif N1 + N2 == NR error("Cannot perform outer product involving a combiner") - elseif N1 == 2 && count_unique(labelsT1,labelsT2) == 1 && - count_common(labelsT1,labelsT2) == 1 - # This is the case of index replacement - println("index replacement") - elseif count_unique(labelsT1,labelsT2) > 1 && - count_common(labelsT1,labelsT2) == 1 - # This is the case of uncombining - println("uncombining") - elseif count_unique(labelsT1,labelsT2) == 1 && - count_common(labelsT1,labelsT2) > 1 + elseif count_common(labelsT1,labelsT2) == 1 + # This is the case of Index replacement or + # uncombining + # TODO: handle the case where inds(R) and inds(T1) + # are not ordered the same? + # Could just use a permutedims... + return Tensor(store(T2),inds(R)) + elseif is_combiner(labelsT1,labelsT2) # This is the case of combining - println("combining") + cpos1,cposR = intersect_positions(labelsT1,labelsR) + labels_comb = deleteat(labelsT1,cpos1) + labels_perm = insertat(labelsR,labels_comb,cposR) + perm = getperm(labels_perm,labelsT2) + T2p = reshape(R,permute(inds(T2),perm)) + permutedims!(T2p,T2,perm) + R = reshape(T2p,inds(R)) end return R end diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 74c8a02df3..355300419e 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -235,7 +235,10 @@ function contract(T1::Tensor{<:Any,N1}, labelsT1, T2::Tensor{<:Any,N2}, labelsT2) where {N1,N2} - indsR,labelsR = contract_inds(inds(T1),labelsT1,inds(T2),labelsT2) + # TODO: put the contract_inds logic into contraction_output, + # call like R = contraction_ouput(T1,labelsT1,T2,labelsT2) + indsR,labelsR = contract_inds(inds(T1),labelsT1, + inds(T2),labelsT2) R = contraction_output(typeof(T1),typeof(T2),indsR) # contract!! version here since the output R may not # be mutable (like UniformDiag) diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index d09d234cf4..9ca029a50a 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -20,6 +20,10 @@ store(T::Tensor) = T.store inds(T::Tensor) = T.inds ind(T::Tensor,j::Integer) = inds(T)[j] +# +# Tools for working with Dims/Tuples +# + # dim and dims are used in the Tensor interface, overload # base Dims here dims(ds::Dims) = ds @@ -43,6 +47,22 @@ StaticArrays.similar_type(::Type{IndsT},::Val{N}) where {IndsT<:Dims,N} = Dims{N unioninds(is1::Dims{N1},is2::Dims{N2}) where {N1,N2} = Dims{N1+N2}((is1...,is2...)) +function deleteat(t::NTuple{N},pos::Int) where {N} + return ntuple(i -> i < pos ? t[i] : t[i+1],Val(N-1)) +end + +function insertat(t::NTuple{N}, + val::NTuple{M}, + pos::Int) where {N,M} + return ntuple(i -> i < pos ? t[i] : + ( i > pos+M-1 ? t[i-1] : + val[i-pos+1] ), Val(N+M-1)) +end + +# +# Generic Tensor functions +# + # The size is obtained from the indices dims(T::Tensor) = dims(inds(T)) dim(T::Tensor) = dim(inds(T)) diff --git a/test/test_combiner.jl b/test/test_combiner.jl index 0979d4ca00..11f1b3f235 100644 --- a/test/test_combiner.jl +++ b/test/test_combiner.jl @@ -1,10 +1,12 @@ using ITensors, Test using Combinatorics: permutations +@testset "Combiner" begin + i = Index(2,"i") -j = Index(2,"j") -k = Index(2,"k") -l = Index(2,"l") +j = Index(3,"j") +k = Index(4,"k") +l = Index(5,"l") A = randomITensor(i, j, k, l) @@ -12,11 +14,11 @@ A = randomITensor(i, j, k, l) for inds_ij ∈ permutations([i,j]) C,c = combiner(inds_ij...) B = A*C - @test hasinds(B, l, k) + @test hasinds(B, l, k, c) @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_il ∈ permutations([i,l]) C,c = combiner(inds_il...) @@ -25,7 +27,7 @@ A = randomITensor(i, j, k, l) @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_ik ∈ permutations([i,k]) C,c = combiner(inds_ik...) @@ -34,7 +36,7 @@ A = randomITensor(i, j, k, l) @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_jk ∈ permutations([j,k]) C,c = combiner(inds_jk...) @@ -43,7 +45,7 @@ A = randomITensor(i, j, k, l) @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_jl ∈ permutations([j,l]) C,c = combiner(inds_jl...) @@ -52,7 +54,7 @@ A = randomITensor(i, j, k, l) @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_kl ∈ permutations([k,l]) C,c = combiner(inds_kl...) @@ -61,7 +63,7 @@ A = randomITensor(i, j, k, l) @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end end @@ -73,7 +75,7 @@ end @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_ijk ∈ permutations([i,j,k]) C,c = combiner(inds_ijk...) @@ -82,7 +84,7 @@ end @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end for inds_jkl ∈ permutations([j,k,l]) C,c = combiner(inds_jkl...) @@ -91,6 +93,9 @@ end @test c == commonindex(B, C) D = B*C @test hasinds(D, i, j, k, l) - @test Array(permute(D, i, j, k, l)) == Array(A) + @test D ≈ A end end + +end + diff --git a/test/test_trg.jl b/test/test_trg.jl index a33ac77243..fb1f541f1d 100644 --- a/test/test_trg.jl +++ b/test/test_trg.jl @@ -53,10 +53,16 @@ function trg(T::ITensor, horiz_inds, vert_inds; Fu = addtags(Fu,"up","renorm") Fd = addtags(Fd,"down","renorm") - T = replacetags(replacetags(Fl,"down","dnleft","orig"),"right","upleft","orig")* - replacetags(replacetags(Fu,"left","upleft","orig"),"down","upright","orig")* - replacetags(replacetags(Fr,"up","upright","orig"),"left","dnright","orig")* - replacetags(replacetags(Fd,"right","dnright","orig"),"up","dnleft","orig") + Fl = replacetags(replacetags(Fl,"down","dnleft","orig"), + "right","upleft","orig") + Fu = replacetags(replacetags(Fu,"left","upleft","orig"), + "down","upright","orig") + Fr = replacetags(replacetags(Fr,"up","upright","orig"), + "left","dnright","orig") + Fd = replacetags(replacetags(Fd,"right","dnright","orig"), + "up","dnleft","orig") + + T = Fl*Fu*Fr*Fd T = replacetags(T,"renorm","orig") l = findindex(T,"left") @@ -64,7 +70,7 @@ function trg(T::ITensor, horiz_inds, vert_inds; u = findindex(T,"up") d = findindex(T,"down") - trT = abs(scalar(T*δ(l,r)*δ(u,d))) + trT = abs((T*δ(l,r)*δ(u,d))[]) T = T/trT κ *= trT^(1.0/2^n) end From 7ac5f198f0ae3ed1c9f68294a35dc75ddeab8f15 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 20 Sep 2019 15:29:06 -0400 Subject: [PATCH 15/27] Get most tests passing. DMRG is currently hanging --- src/decomp.jl | 4 ++-- src/mps/mpo.jl | 8 ++++---- src/tensor/dense.jl | 10 +++++++--- test/runtests.jl | 2 +- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/decomp.jl b/src/decomp.jl index aa8fc37dd1..1ae6d0cdce 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -181,7 +181,7 @@ function eigenHermitian(A::ITensor, tags::TagSet = get(kwargs,:tags,"Link,eigen") lefttags::TagSet = get(kwargs,:lefttags,tags) righttags::TagSet = get(kwargs,:righttags,prime(tags)) - Lis = commoninds(inds(A),IndexSet(Linds...)) + Lis = commoninds(inds(A),IndexSet(Linds)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) UT,DT = eigenHermitian(Tensor(A),Lpos,Rpos;kwargs...) @@ -201,7 +201,7 @@ function eigen(A::ITensor, Rinds=prime(IndexSet(Linds)); kwargs...) tags::TagSet = get(kwargs,:tags,"Link,eigen") - Lis = commoninds(inds(A),IndexSet(Linds...)) + Lis = commoninds(inds(A),IndexSet(Linds)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) UT,DT = eigen(Tensor(A),Lpos,Rpos;kwargs...) diff --git a/src/mps/mpo.jl b/src/mps/mpo.jl index ebb26a9d70..f309e45600 100644 --- a/src/mps/mpo.jl +++ b/src/mps/mpo.jl @@ -258,8 +258,8 @@ function sum(A::T, B::T; kwargs...) where {T <: Union{MPS, MPO}} lAs = [linkindex(A, i) for i in 1:n-1] prime!(A, rand_plev, "Link") - first = fill(ITensor(), n) - second = fill(ITensor(), n) + first = Vector{ITensor{2}}(undef,n-1) + second = Vector{ITensor{2}}(undef,n-1) for i in 1:n-1 lA = linkindex(A, i) lB = linkindex(B, i) @@ -316,7 +316,7 @@ function densityMatrixApplyMPO(A::MPO, psi::MPS; kwargs...)::MPS ts = tags(commonindex(psi[n], psi[n-1])) Lis = commonindex(ρ, A[n]) Ris = uniqueinds(ρ, Lis) - FU, D = eigen(ρ, Lis, Ris; tags=ts, kwargs...) + FU, D = eigenHermitian(ρ, Lis, Ris; tags=ts, kwargs...) psi_out[n] = setprime(dag(FU), 0, "Site") O = O * FU * psi[n-1] * A[n-1] O = prime(O, -1, "Site") @@ -326,7 +326,7 @@ function densityMatrixApplyMPO(A::MPO, psi::MPS; kwargs...)::MPS ts = tags(commonindex(psi[j], psi[j-1])) Lis = IndexSet(commonindex(ρ, A[j]), commonindex(ρ, psi_out[j+1])) Ris = uniqueinds(ρ, Lis) - FU, D = eigen(ρ, Lis, Ris; tags=ts, kwargs...) + FU, D = eigenHermitian(ρ, Lis, Ris; tags=ts, kwargs...) psi_out[j] = dag(FU) O = O * FU * psi[j-1] * A[j-1] O = prime(O, -1, "Site") diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 355300419e..ebc3b92ded 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -550,8 +550,10 @@ function LinearAlgebra.qr(T::DenseTensor{<:Number,N,IndsT}, QM,RM = qr(M) q = ind(QM,2) r = ind(RM,1) + # TODO: simplify this by permuting inds(T) by (Lpos,Rpos) + # then grab Linds,Rinds Linds = similar_type(IndsT,Val(NL))(ntuple(i->inds(T)[Lpos[i]],Val(NL))) - Qinds = push(Linds,q) + Qinds = push(Linds,r) Q = reshape(QM,Qinds) Rinds = similar_type(IndsT,Val(NR))(ntuple(i->inds(T)[Rpos[i]],Val(NR))) Rinds = pushfirst(Rinds,r) @@ -561,10 +563,12 @@ end function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} + # TODO: just call qr on T directly (make sure + # that is fast) QM,RM = qr(Matrix(T)) - dim = size(QM,2) # Make the new indices to go onto Q and R - q = eltype(IndsT)(dim) + q,r = inds(T) + q = dim(q) < dim(r) ? sim(q) : sim(r) Qinds = IndsT((ind(T,1),q)) Rinds = IndsT((q,ind(T,2))) Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) diff --git a/test/runtests.jl b/test/runtests.jl index 038e880471..a7f67c739b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -12,7 +12,7 @@ using ITensors, Test "test_combiner.jl", "test_trg.jl", "test_ctmrg.jl", - "test_dmrg.jl", + #"test_dmrg.jl", "test_siteset.jl", "test_phys_sitesets.jl", "test_decomp.jl", From 4765caca87a1f1453ca88f9ac0eb1c7b1469c992 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 20 Sep 2019 16:08:35 -0400 Subject: [PATCH 16/27] Some code cleanup --- src/indexset.jl | 42 +++++++++++++ src/tensor/combiner.jl | 120 +----------------------------------- src/tensor/dense.jl | 2 - src/tensor/diag.jl | 2 - src/tensor/tensorstorage.jl | 63 +------------------ test/test_indexset.jl | 8 +-- 6 files changed, 49 insertions(+), 188 deletions(-) diff --git a/src/indexset.jl b/src/indexset.jl index bfe6441a8c..413456c90e 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -623,3 +623,45 @@ function is_trivial_permutation(P) return true end +function count_unique(labelsT1,labelsT2) + count = 0 + for l1 ∈ labelsT1 + l1 ∉ labelsT2 && (count += 1) + end + return count +end + +function count_common(labelsT1,labelsT2) + count = 0 + for l1 ∈ labelsT1 + l1 ∈ labelsT2 && (count += 1) + end + return count +end + +function intersect_positions(labelsT1,labelsT2) + for i1 = 1:length(labelsT1) + for i2 = 1:length(labelsT2) + if labelsT1[i1] == labelsT2[i2] + return i1,i2 + end + end + end + return nothing +end + +function is_replacement(labelsT1,labelsT2) + return count_unique(labelsT1,labelsT2) == 1 && + count_common(labelsT1,labelsT2) == 1 +end + +function is_combiner(labelsT1,labelsT2) + return count_unique(labelsT1,labelsT2) == 1 && + count_common(labelsT1,labelsT2) > 1 +end + +function is_uncombiner(labelsT1,labelsT2) + return count_unique(labelsT1,labelsT2) > 1 && + count_common(labelsT1,labelsT2) == 1 +end + diff --git a/src/tensor/combiner.jl b/src/tensor/combiner.jl index b440836858..c6c8778191 100644 --- a/src/tensor/combiner.jl +++ b/src/tensor/combiner.jl @@ -3,6 +3,8 @@ export Combiner struct Combiner <: TensorStorage end +data(::Combiner) = error("Combiner storage has no data") + Base.eltype(::Type{<:Combiner}) = Nothing Base.eltype(::StoreT) where {StoreT<:Combiner} = eltype(StoreT) @@ -38,48 +40,6 @@ function contraction_output(TensorT1::Type{<:DenseTensor}, return contraction_output(TensorT2,TensorT1,indsR) end -function count_unique(labelsT1,labelsT2) - count = 0 - for l1 ∈ labelsT1 - l1 ∉ labelsT2 && (count += 1) - end - return count -end - -function count_common(labelsT1,labelsT2) - count = 0 - for l1 ∈ labelsT1 - l1 ∈ labelsT2 && (count += 1) - end - return count -end - -function intersect_positions(labelsT1,labelsT2) - for i1 = 1:length(labelsT1) - for i2 = 1:length(labelsT2) - if labelsT1[i1] == labelsT2[i2] - return i1,i2 - end - end - end - return nothing -end - -function is_replacement(labelsT1,labelsT2) - return count_unique(labelsT1,labelsT2) == 1 && - count_common(labelsT1,labelsT2) == 1 -end - -function is_combiner(labelsT1,labelsT2) - return count_unique(labelsT1,labelsT2) == 1 && - count_common(labelsT1,labelsT2) > 1 -end - -function is_uncombiner(labelsT1,labelsT2) - return count_unique(labelsT1,labelsT2) > 1 && - count_common(labelsT1,labelsT2) == 1 -end - function contract!!(R::Tensor{<:Number,NR}, labelsR::NTuple{NR}, T1::CombinerTensor{<:Any,N1}, @@ -120,79 +80,3 @@ function contract!!(R::Tensor{<:Number,NR}, return contract!!(R,labelsR,T2,labelsT2,T1,labelsT1) end -#function storage_contract(CSstore::CombinerStorage, -# Cis::IndexSet, -# Dstore::Dense, -# dis::IndexSet) -# cind = Cis[1] -# if hasindex(dis, cind) # has combined index, uncombine -# cpos = indexposition(dis,cind) -# dinds = inds(dis) -# Cinds = inds(Cis) -# Nis = IndexSet(vcat(dinds[1:cpos-1], Cinds[2:end], dinds[cpos+1:end])) -# return Nis, Dstore -# else # lacks combined index, combine -# # dis doesn't have cind, replace -# # Cis[1], Cis[2], ... with cind, may need to permute -# j_1_pos = indexposition(dis,Cis[2]) -# if isnothing(j_1_pos) -# throw(ArgumentError("tensor missing index $(Cis[2]) in combiner-tensor product")) -# end -# -# # Check if Cis[2], Cis[3], ... are grouped together (contiguous) -# # and in same order as on combiner -# contig_sameord = true -# j = j_1_pos+1 -# for c=3:length(Cis) -# if j > length(dis) || (dis[j] != Cis[c]) -# contig_sameord = false -# break -# end -# j += 1 -# end -# -# if contig_sameord -# lend = j_1_pos-1 -# rstart = j_1_pos+length(Cis)-1 -# dinds = inds(dis) -# Nis = IndexSet(vcat(dinds[1:lend], cind, dinds[rstart:end])) -# else # permutation required -# # Set P destination values to -1 to mark indices that need to be assigned destinations: -# P = fill(-1, length(dis)) -# # permute combined indices to the front, in same order as in Cis: -# ni = 1 -# for c in 2:length(Cis) -# j = indexposition(dis, Cis[c]) -# if isnothing(j) -# throw(ArgumentError("tensor missing index $(Cis[c]) in combiner-tensor product")) -# end -# P[j] = ni -# ni += 1 -# end -# -# Nis = IndexSet(length(dis)+2-length(Cis)) -# Nis[1] = cind -# i = 2 -# for j in 1:length(dis) -# if P[j] == -1 -# P[j] = ni -# ni += 1 -# Nis[i] = dis[j] -# i += 1 -# end -# end -# -# ddata = vec(permutedims(reshape(data(Dstore), dims(dis)), invperm(P))) -# Dstore = Dense{eltype(Dstore)}(ddata) -# end -# return Nis, Dstore -# end -#end -# -#function storage_contract(Dstore::Dense, -# dis::IndexSet, -# CSstore::CombinerStorage, -# Cis::IndexSet) -# return storage_contract(CSstore, Cis, Dstore, dis) -#end - diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index ebc3b92ded..cea1613657 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -12,8 +12,6 @@ struct Dense{T} <: TensorStorage Dense{T}() where {T} = new{T}(Vector{T}()) end -data(D::Dense) = D.data - # Convenient functions for Dense storage type Base.getindex(D::Dense,i::Integer) = data(D)[i] Base.setindex!(D::Dense,v,i::Integer) = (data(D)[i] = v) diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index a86bd5c09a..23f104842e 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -8,8 +8,6 @@ struct Diag{T} <: TensorStorage Diag{T}(data) where {T} = new{T}(data) end -data(D::Diag) = D.data - const NonuniformDiag{T} = Diag{T} where {T<:AbstractVector} const UniformDiag{T} = Diag{T} where {T<:Number} diff --git a/src/tensor/tensorstorage.jl b/src/tensor/tensorstorage.jl index 928b62fe0a..360f0493dc 100644 --- a/src/tensor/tensorstorage.jl +++ b/src/tensor/tensorstorage.jl @@ -2,66 +2,5 @@ export data abstract type TensorStorage end -# -# Generic ITensor storage functions -# - -#storage_randn!(S::TensorStorage) = randn!(data(S)) -#storage_norm(S::TensorStorage) = norm(data(S)) -#storage_conj(S::T) where {T<:TensorStorage}= T(conj(data(S))) - -# -# This high level part of contract is generic to different -# storage types -# -# TODO: make this storage_contract!(), where C is pre-allocated. -# This will allow for in-place multiplication -# TODO: optimize the contraction logic so C doesn't get permuted? -# -#function storage_contract(Astore::TensorStorage, -# Ais::IndexSet, -# Bstore::TensorStorage, -# Bis::IndexSet) -# if length(Ais)==0 -# Cis = Bis -# Cstore = storage_scalar(Astore)*Bstore -# elseif length(Bis)==0 -# Cis = Ais -# Cstore = storage_scalar(Bstore)*Astore -# else -# #TODO: check for special case when Ais and Bis are disjoint sets -# #I think we should do this analysis outside of storage_contract, at the ITensor level -# #(since it is universal for any storage type and just analyzes in indices) -# (Alabels,Blabels) = compute_contraction_labels(Ais,Bis) -# if is_outer(Alabels,Blabels) -# Cstore,Cis = storage_outer(Astore,Ais,Bstore,Bis) -# else -# (Cis,Clabels) = contract_inds(Ais,Alabels,Bis,Blabels) -# Cstore = _contract(Cis,Clabels,Astore,Ais,Alabels,Bstore,Bis,Blabels) -# end -# end -# return (Cis,Cstore) -#end - -# Generic outer function that handles proper -# storage promotion -# TODO: should this handle promotion with storage -# type switching? -# TODO: we should combine all of the storage_add! -# outer wrappers into a single call that promotes -# based on the storage type, i.e. promote_type(Dense,Diag) -> Dense -#function storage_add!(Bstore::BT, -# Bis::IndexSet, -# Astore::AT, -# Ais::IndexSet, -# x::Number = 1) where {BT<:TensorStorage,AT<:TensorStorage} -# NT = promote_type(AT,BT) -# if NT == BT -# _add!(Bstore,Bis,Astore,Ais, x) -# return Bstore -# end -# Nstore = storage_convert(NT,Bstore,Bis) -# _add!(Nstore,Bis,Astore,Ais, x) -# return Nstore -#end +data(S::TensorStorage) = S.data diff --git a/test/test_indexset.jl b/test/test_indexset.jl index 58000ab93a..943d552ec8 100644 --- a/test/test_indexset.jl +++ b/test/test_indexset.jl @@ -43,24 +43,24 @@ using ITensors, I3 = IndexSet(j,l) @test hassameinds(I1,(k,j,i)) @test uniqueindex(I1,(I2,I3)) == i - @test uniqueindex(I1,IndexSet(k, j, i)) === nothing + @test isnothing(uniqueindex(I1,IndexSet(k, j, i))) @test uniqueinds(I1,I2) == IndexSet(i,j) @test setdiff(I1,I2) == IndexSet(i,j) @test hassameinds(uniqueinds(I1,I2),(j,i)) @test commoninds(I1,I2) == IndexSet(k) @test commonindex(I1,I2) == k - @test commonindex(I1,IndexSet(l)) === nothing + @test isnothing(commonindex(I1,IndexSet(l))) @test commoninds(I1,(j,l)) == IndexSet(j) @test commonindex(I1,(j,l)) == j @test commoninds(I1,(j,k)) == IndexSet(j,k) @test hassameinds(commoninds(I1,(j,k,l)),(j,k)) @test findinds(I1,"i") == IndexSet(i) @test findindex(I1,"j") == j - @test findindex(I1,"l") === nothing + @test isnothing(findindex(I1,"l")) @test indexposition(I1,i) == 1 @test indexposition(I1,j) == 2 @test indexposition(I1,k) == 3 - @test indexposition(I1,Index(2)) === nothing + @test isnothing(indexposition(I1,Index(2))) end @testset "commoninds index ordering" begin I = IndexSet(i,k,j) From 13a402b3669a6bb60835e7f67ca18fe941603661 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Fri, 20 Sep 2019 16:36:28 -0400 Subject: [PATCH 17/27] More code cleanup --- src/tensor/contraction_logic.jl | 102 -------------------------------- 1 file changed, 102 deletions(-) diff --git a/src/tensor/contraction_logic.jl b/src/tensor/contraction_logic.jl index 34e741097d..1cc86cf81e 100644 --- a/src/tensor/contraction_logic.jl +++ b/src/tensor/contraction_logic.jl @@ -228,7 +228,6 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) bind = props.Bcstart for i = 1:props.ncont while !contractedB(props,bind) bind += 1 end - #j = find_index(props.ai,props.bi[bind]) j = findfirst(==(props.bi[bind]),props.ai) newi += 1 props.PA[newi] = j @@ -240,7 +239,6 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) #appear in same order as on C #TODO: check this is correct for 1-indexing for k = 1:rc - #j = find_index(props.ai,props.ci[k]) j = findfirst(==(props.ci[k]),props.ai) if !isnothing(j) props.AtoC[newi] = k @@ -290,7 +288,6 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) aind = props.Acstart for i = 0:(props.ncont-1) while !contractedA(props,aind) aind += 1 end - #j = find_index(props.bi,props.ai[aind]) j = findfirst(==(props.ai[aind]),props.bi) newi += 1 props.PB[newi] = j @@ -302,7 +299,6 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) #Permute uncontracted indices to #appear in same order as on C for k = 1:rc - #j = find_index(props.bi,props.ci[k]) j = findfirst(==(props.ci[k]),props.bi) if !isnothing(j) props.BtoC[newi] = k @@ -394,101 +390,3 @@ function compute_contraction_properties!(props::ContractionProperties, A, B, C) end -#function _contract_dense_dense!(C::Array{T}, -# p::CProps, -# A::Array{T}, -# B::Array{T}, -# α::T=one(T), -# β::T=zero(T)) where {T} -# tA = 'N' -# if p.permuteA -# aref = reshape(permutedims(A,p.PA),p.dmid,p.dleft) -# tA = 'T' -# else -# #A doesn't have to be permuted -# if Atrans(p) -# aref = reshape(A,p.dmid,p.dleft) -# tA = 'T' -# else -# aref = reshape(A,p.dleft,p.dmid) -# end -# end -# -# tB = 'N' -# if p.permuteB -# bref = reshape(permutedims(B,p.PB),p.dmid,p.dright) -# else -# if Btrans(p) -# bref = reshape(B,p.dright,p.dmid) -# tB = 'T' -# else -# bref = reshape(B,p.dmid,p.dright) -# end -# end -# -# # TODO: this logic may be wrong -# if p.permuteC -# cref = reshape(copy(C),p.dleft,p.dright) -# else -# if Ctrans(p) -# cref = reshape(C,p.dleft,p.dright) -# if tA=='N' && tB=='N' -# (aref,bref) = (bref,aref) -# tA = tB = 'T' -# elseif tA=='T' && tB=='T' -# (aref,bref) = (bref,aref) -# tA = tB = 'N' -# end -# else -# cref = reshape(C,p.dleft,p.dright) -# end -# end -# -# #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) -# BLAS.gemm!(tA,tB,α,aref,bref,β,cref) -# -# if p.permuteC -# permutedims!(C,reshape(cref,p.newCrange...),p.PC) -# end -# return -#end - -#TODO: this should be optimized -#function _contract_scalar!(Cdata::Array,Clabels::Vector{Int}, -# Bdata::Array,Blabels::Vector{Int},α,β) -# p = calculate_permutation(Blabels,Clabels) -# if β==0 -# if is_trivial_permutation(p) -# Cdata .= α.*Bdata -# else -# #TODO: make an optimized permutedims!() that also scales the data -# permutedims!(Cdata,α*Bdata) -# end -# else -# if is_trivial_permutation(p) -# Cdata .= α.*Bdata .+ β.*Cdata -# else -# #TODO: make an optimized permutedims!() that also adds and scales the data -# permBdata = permutedims(Bdata,p) -# Cdata .= α.*permBdata .+ β.*Cdata -# end -# end -# return -#end - -#function contract!(Cdata::Array{T},Clabels::Vector{Int}, -# Adata::Array{T},Alabels::Vector{Int}, -# Bdata::Array{T},Blabels::Vector{Int}, -# α::T=one(T),β::T=zero(T)) where {T} -# if(length(Alabels)==0) -# contract_scalar!(Cdata,Clabels,Bdata,Blabels,α*Adata[1],β) -# elseif(length(Blabels)==0) -# contract_scalar!(Cdata,Clabels,Adata,Alabels,α*Bdata[1],β) -# else -# props = CProps(Alabels,Blabels,Clabels) -# compute!(props,Adata,Bdata,Cdata) -# contract!(Cdata,props,Adata,Bdata,α,β) -# end -# return -#end - From 0c15d3212936f230bbd92ef48396239e15c4c803 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Mon, 30 Sep 2019 15:38:57 -0400 Subject: [PATCH 18/27] Get tests passing again. Change Tensor -> tensor for creating a Tensor view of an ITensor. --- src/decomp.jl | 14 ++--- src/itensor.jl | 109 +++++++++++++++++++---------------- src/mps/mpo.jl | 2 +- src/tensor/dense.jl | 92 +++++++++++++++++++----------- src/tensor/tensor.jl | 17 ++++-- test/test_contract.jl | 88 ++++++++++++++-------------- test/test_itensor_dense.jl | 49 +++++++--------- test/test_mpo.jl | 26 ++++----- test/test_phys_sitesets.jl | 114 ++++++++++++++++++------------------- 9 files changed, 275 insertions(+), 236 deletions(-) diff --git a/src/decomp.jl b/src/decomp.jl index 1ae6d0cdce..90ae5b70e7 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -10,7 +10,7 @@ function qr(A::ITensor, Lis = commoninds(inds(A),IndexSet(Linds...)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - QT,RT = qr(Tensor(A),Lpos,Rpos) + QT,RT = qr(tensor(A),Lpos,Rpos) Q,R = ITensor(QT),ITensor(RT) q = commonindex(Q,R) settags!(Q,tags,q) @@ -26,7 +26,7 @@ function polar(A::ITensor, Lis = commoninds(inds(A),IndexSet(Linds...)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - UT,PT = polar(Tensor(A),Lpos,Rpos) + UT,PT = polar(tensor(A),Lpos,Rpos) U,P = ITensor(UT),ITensor(PT) u = commoninds(U,P) p = uniqueinds(P,U) @@ -67,7 +67,7 @@ function svd(A::ITensor, Lis = commoninds(inds(A),IndexSet(Linds...)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - UT,ST,VT = svd(Tensor(A),Lpos,Rpos;kwargs...) + UT,ST,VT = svd(tensor(A),Lpos,Rpos;kwargs...) U,S,V = ITensor(UT),ITensor(ST),ITensor(VT) u₀ = commonindex(U,S) v₀ = commonindex(S,V) @@ -175,7 +175,7 @@ function factorize(A::ITensor, end function eigenHermitian(A::ITensor, - Linds=findinds(A,"0"), + Linds=findinds(A,("",0)), Rinds=prime(IndexSet(Linds)); kwargs...) tags::TagSet = get(kwargs,:tags,"Link,eigen") @@ -184,7 +184,7 @@ function eigenHermitian(A::ITensor, Lis = commoninds(inds(A),IndexSet(Linds)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - UT,DT = eigenHermitian(Tensor(A),Lpos,Rpos;kwargs...) + UT,DT = eigenHermitian(tensor(A),Lpos,Rpos;kwargs...) U,D = ITensor(UT),ITensor(DT) u = commonindex(U,D) settags!(U,lefttags,u) @@ -197,14 +197,14 @@ end import LinearAlgebra.eigen function eigen(A::ITensor, - Linds=findinds(A,"0"), + Linds=findinds(A,("",0)), Rinds=prime(IndexSet(Linds)); kwargs...) tags::TagSet = get(kwargs,:tags,"Link,eigen") Lis = commoninds(inds(A),IndexSet(Linds)) Ris = uniqueinds(inds(A),Lis) Lpos,Rpos = getperms(inds(A),Lis,Ris) - UT,DT = eigen(Tensor(A),Lpos,Rpos;kwargs...) + UT,DT = eigen(tensor(A),Lpos,Rpos;kwargs...) U,D = ITensor(UT),ITensor(DT) u = commonindex(U,D) settags!(U,tags,u) diff --git a/src/itensor.jl b/src/itensor.jl index b90dec5d58..94fcc70d58 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -16,6 +16,10 @@ export ITensor, permute, randomITensor, diagITensor, + tensor, + array, + matrix, + vector, scalar, store, dense @@ -47,8 +51,11 @@ ITensor() = ITensor(Dense{Nothing}(),IndexSet()) ITensor(is::IndexSet) = ITensor(Float64,is...) ITensor(inds::Index...) = ITensor(IndexSet(inds...)) -# TODO: add versions where the types can be specified -Tensor(A::ITensor) = Tensor(store(A),inds(A)) +# Convert the ITensor to a Tensor that shares the same +# data and indices as the ITensor +# TODO: should we define a `convert(::Type{<:Tensor},::ITensor)` +# method? +tensor(A::ITensor) = Tensor(store(A),inds(A)) function ITensor(::Type{T}, inds::IndexSet) where {T<:Number} @@ -243,31 +250,29 @@ Base.size(A::ITensor{N}, d::Int) where {N} = d in 1:N ? dim(inds(A)[d]) : isNull(T::ITensor) = (eltype(T) === Nothing) -Base.copy(T::ITensor{N}) where {N} = ITensor{N}(copy(Tensor(T)))::ITensor{N} +Base.copy(T::ITensor{N}) where {N} = ITensor{N}(copy(tensor(T))) # TODO: make versions where the element type can be specified -# Should this be array? (Version that makes a view) -Base.Array(T::ITensor) = Array(Tensor(T)) +# Should this be called `array`? (Version that makes a view, if dense) +array(T::ITensor) = array(tensor(T)) -Base.Matrix(T::ITensor{N}) where {N} = (N==2 ? Array(Tensor(T)) : throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix"))) +matrix(T::ITensor{N}) where {N} = (N==2 ? array(tensor(T)) : throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix"))) -Base.Vector(T::ITensor{N}) where {N} = (N==1 ? Array(Tensor(T)) : throw(DimensionMismatch("ITensor must be order 1 to convert to a Vector"))) +vector(T::ITensor{N}) where {N} = (N==1 ? array(tensor(T)) : throw(DimensionMismatch("ITensor must be order 1 to convert to a Vector"))) scalar(T::ITensor) = T[] -#Array(T::ITensor) = storage_convert(Array,store(T),inds(T)) - -function Base.Array(T::ITensor{N},is::Vararg{Index,N}) where {N} +function array(T::ITensor{N},is::Vararg{Index,N}) where {N} perm = getperm(inds(T),is) - return Array(permutedims(Tensor(T),perm)) + return array(permutedims(tensor(T),perm)) end -function Base.Matrix(T::ITensor{N},i1::Index,i2::Index) where {N} +function matrix(T::ITensor{N},i1::Index,i2::Index) where {N} N≠2 && throw(DimensionMismatch("ITensor must be order 2 to convert to a Matrix")) - return Array(T,i1,i2) + return array(T,i1,i2) end -Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = Tensor(T)[vals...] +Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = tensor(T)[vals...] function Base.getindex(T::ITensor{N}, ivs::Vararg{IndexVal,N}) where {N} @@ -284,9 +289,9 @@ end # return Tensor(store(T),inds(T))[vals...] #end -Base.getindex(T::ITensor) = Tensor(T)[] +Base.getindex(T::ITensor) = tensor(T)[] -Base.setindex!(T::ITensor{N},x::Number,vals::Vararg{Int,N}) where {N} = (Tensor(T)[vals...] = x) +Base.setindex!(T::ITensor{N},x::Number,vals::Vararg{Int,N}) where {N} = (tensor(T)[vals...] = x) function Base.setindex!(T::ITensor,x::Number,ivs::IndexVal...) p = getperm(inds(T),ivs) @@ -308,7 +313,7 @@ end function Base.fill!(T::ITensor, x::Number) # TODO: automatically switch storage type if needed? - Tensor(T) .= x + tensor(T) .= x return T end @@ -376,23 +381,29 @@ end # so it may be nice to have a seperate scalar(T) for when dim(T)==1 #function scalar(T::ITensor) # !(order(T)==0 || dim(T)==1) && throw(ArgumentError("ITensor with inds $(inds(T)) is not a scalar")) -# return scalar(Tensor(store(T),inds(T))) +# return scalar(tensor(store(T),inds(T))) #end function Random.randn!(T::ITensor) - return randn!(Tensor(store(T),inds(T))) + return randn!(tensor(T)) end const Indices = Union{IndexSet,Tuple{Vararg{Index}}} -function randomITensor(::Type{S},inds::Indices) where {S<:Number} +function randomITensor(::Type{S}, + inds::Indices) where {S<:Number} T = ITensor(S,IndexSet(inds)) randn!(T) return T end -randomITensor(::Type{S},inds::Index...) where {S<:Number} = randomITensor(S,IndexSet(inds...)) -randomITensor(inds::Indices) = randomITensor(Float64,IndexSet(inds)) -randomITensor(inds::Index...) = randomITensor(Float64,IndexSet(inds...)) +function randomITensor(::Type{S}, + inds::Index...) where {S<:Number} + return randomITensor(S,IndexSet(inds...)) +end +randomITensor(inds::Indices) = randomITensor(Float64, + IndexSet(inds)) +randomITensor(inds::Index...) = randomITensor(Float64, + IndexSet(inds...)) function combiner(inds::IndexSet; kwargs...) tags = get(kwargs, :tags, "CMB,Link") @@ -404,28 +415,28 @@ combiner(inds::Index...; kwargs...) = combiner(IndexSet(inds...); kwargs...) combinedindex(T::ITensor) = store(T) isa Combiner ? store(T).ci : nothing -LinearAlgebra.norm(T::ITensor) = norm(Tensor(T)) +LinearAlgebra.norm(T::ITensor) = norm(tensor(T)) function dag(T::ITensor) - TT = conj(Tensor(T)) + TT = conj(tensor(T)) return ITensor(store(TT),dag(inds(T))) end function permute(T::ITensor,new_inds) perm = getperm(new_inds,inds(T)) - TT = permutedims(Tensor(store(T),inds(T)),perm) - return ITensor(TT) + Tp = permutedims(tensor(T),perm) + return ITensor(Tp) end permute(T::ITensor,inds::Index...) = permute(T,IndexSet(inds...)) function Base.:*(T::ITensor,x::Number) - return ITensor(x*Tensor(T)) + return ITensor(x*tensor(T)) end Base.:*(x::Number,T::ITensor) = T*x #TODO: make a proper element-wise division Base.:/(A::ITensor,x::Number) = A*(1.0/x) -Base.:-(A::ITensor) = ITensor(-Tensor(A)) +Base.:-(A::ITensor) = ITensor(-tensor(A)) function Base.:+(A::ITensor,B::ITensor) C = copy(A) C = add!(C,B) @@ -437,9 +448,9 @@ function Base.:-(A::ITensor,B::ITensor) return C end -function *(A::ITensor,B::ITensor) +function Base.:*(A::ITensor,B::ITensor) (Alabels,Blabels) = compute_contraction_labels(inds(A),inds(B)) - CT = contract(Tensor(A),Alabels,Tensor(B),Blabels) + CT = contract(tensor(A),Alabels,tensor(B),Blabels) C = ITensor(CT) if warnTensorOrder > 0 && order(C) >= warnTensorOrder println("Warning: contraction resulted in ITensor with $(order(C)) indices") @@ -447,9 +458,7 @@ function *(A::ITensor,B::ITensor) return C end -dot(A::ITensor,B::ITensor) = (dag(A)*B)[] - -import LinearAlgebra.exp +LinearAlgebra.dot(A::ITensor,B::ITensor) = (dag(A)*B)[] """ exp(A::ITensor, Lis::IndexSet; hermitian = false) @@ -460,12 +469,14 @@ be defined. When `hermitian=true` the exponential of `Hermitian(reshape(A, dim(Lis), :))` is computed internally. """ -function exp(A::ITensor, Lis::IndexSet; hermitian = false) - (dim(Lis) == dim(inds(A))/dim(Lis)) || throw(DimensionMismatch("dimension of the left index set `Lis` must be - equal to `dim(inds(A))/dim(Lis)`")) - A, Lis, Ris = _permute_for_factorize(A,Lis) - expAs = storage_exp(store(A), Lis,Ris, hermitian = hermitian) - return ITensor(inds(A),expAs) +function LinearAlgebra.exp(A::ITensor, + Linds, + Rinds = prime(IndexSet(Linds)); + ishermitian = false) + Lis,Ris = IndexSet(Linds),IndexSet(Rinds) + Lpos,Rpos = getperms(inds(A),Lis,Ris) + expAT = exp(tensor(A),Lpos,Rpos;ishermitian=ishermitian) + return ITensor(expAT) end @@ -493,7 +504,7 @@ B .= A """ function Base.copyto!(R::ITensor{N},T::ITensor{N}) where {N} perm = getperm(inds(R),inds(T)) - TR = permutedims!(Tensor(R),Tensor(T),perm) + TR = permutedims!(tensor(R),tensor(T),perm) return ITensor(TR) end @@ -527,7 +538,7 @@ end function apply!(R::ITensor{N},T::ITensor{N},f::Function) where {N} perm = getperm(inds(R),inds(T)) - TR,TT = Tensor(R),Tensor(T) + TR,TT = tensor(R),tensor(T) # TODO: Include type promotion from α TR = convert(promote_type(typeof(TR),typeof(TT)),TR) @@ -561,7 +572,7 @@ A .*= x ``` """ function scale!(T::ITensor,x::Number) - TT = Tensor(T) + TT = tensor(T) TT .*= x return T end @@ -587,23 +598,23 @@ end # TODO: make a specialized printing from Diag # that emphasizes the missing elements -function show(io::IO,T::ITensor) +function Base.show(io::IO,T::ITensor) summary(io,T) print(io,"\n") if !isNull(T) - Base.print_array(io,Array(T)) + Base.print_array(io,array(T)) end end -function show(io::IO, - mime::MIME"text/plain", - T::ITensor) +function Base.show(io::IO, + mime::MIME"text/plain", + T::ITensor) summary(io,T) end function Base.similar(T::ITensor, element_type=eltype(T)) - return ITensor(similar(Tensor(T),element_type)) + return ITensor(similar(tensor(T),element_type)) end function multSiteOps(A::ITensor, diff --git a/src/mps/mpo.jl b/src/mps/mpo.jl index b1e6fcc39a..6b29a9170f 100644 --- a/src/mps/mpo.jl +++ b/src/mps/mpo.jl @@ -403,7 +403,7 @@ function naiveApplyMPO(A::MPO, psi::MPS; kwargs...)::MPS for b=1:(N-1) Al = commonindex(A[b],A[b+1]) pl = commonindex(psi[b],psi[b+1]) - C = combiner(Al,pl) + C,_ = combiner(Al,pl) psi_out[b] *= C psi_out[b+1] *= C end diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index cea1613657..01103a026c 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -58,9 +58,17 @@ Base.:*(x::Number,D::Dense) = D*x const DenseTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Dense} # Basic functionality for AbstractArray interface -Base.IndexStyle(::Type{TensorT}) where {TensorT<:DenseTensor} = IndexLinear() -Base.getindex(T::DenseTensor,i::Integer) = store(T)[i] -Base.setindex!(T::DenseTensor,v,i::Integer) = (store(T)[i] = v) +Base.IndexStyle(::Type{<:DenseTensor}) = IndexLinear() +Base.getindex(T::DenseTensor,i::Int) = store(T)[i] +Base.setindex!(T::DenseTensor,v,i::Int) = (store(T)[i] = v) + +# How does Julia map from IndexCartesian to IndexLinear? +#Base.getindex(T::DenseTensor{<:Number,N}, +# i::Vararg{Int,N}) where {N} = +#store(T)[sum(i.*strides(T))+1-sum(strides(T))] +#Base.setindex!(T::DenseTensor{<:Number,N}, +# v,i::Vararg{Int,N}) where {N} = +#(store(T)[sum(i.*strides(T))+1-sum(strides(T))] = v) # Get the specified value on the diagonal function getdiag(T::DenseTensor{<:Number,N},ind::Int) where {N} @@ -72,10 +80,12 @@ function setdiag!(T::DenseTensor{<:Number,N},val,ind::Int) where {N} T[CartesianIndex(ntuple(_->ind,Val(N)))] = val end +# This is for BLAS/LAPACK Base.strides(T::DenseTensor) = strides(inds(T)) -# Needed for passing Tensor{T,2} to BLAS -function Base.unsafe_convert(::Type{Ptr{ElT}},T::DenseTensor{ElT}) where {ElT} +# Needed for passing Tensor{T,2} to BLAS/LAPACK +function Base.unsafe_convert(::Type{Ptr{ElT}}, + T::DenseTensor{ElT}) where {ElT} return Base.unsafe_convert(Ptr{ElT},data(store(T))) end @@ -102,15 +112,15 @@ end # Create an Array that is a view of the Dense Tensor # Useful for using Base Array functions -Base.Array(T::DenseTensor) = reshape(data(store(T)),dims(inds(T))) -Base.Matrix(T::DenseTensor{<:Number,2}) = Array(T) -Base.Vector(T::DenseTensor{<:Number,1}) = Array(T) +array(T::DenseTensor) = reshape(data(store(T)),dims(inds(T))) +matrix(T::DenseTensor{<:Number,2}) = array(T) +vector(T::DenseTensor{<:Number,1}) = array(T) # TODO: call permutedims!(R,T,perm,(r,t)->t)? function Base.permutedims!(R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}, perm::NTuple{N,Int}) where {N} - permutedims!(Array(R),Array(T),perm) + permutedims!(array(R),array(T),perm) return R end @@ -210,7 +220,7 @@ end # TODO: call outer!!, make this generic function outer(T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2}) where {ElT1,ElT2} - array_outer = vec(Array(T1))*transpose(vec(Array(T2))) + array_outer = vec(array(T1))*transpose(vec(array(T2))) inds_outer = unioninds(inds(T1),inds(T2)) return Tensor(Dense{promote_type(ElT1,ElT2)}(vec(array_outer)),inds_outer) end @@ -314,27 +324,27 @@ function _contract!(C::DenseTensor{El,NC}, props::ContractionProperties) where {El,NC,NA,NB} tA = 'N' if props.permuteA - aref = reshape(permutedims(A,NTuple{NA,Int}(props.PA)),props.dmid,props.dleft) + AM = reshape(permutedims(A,NTuple{NA,Int}(props.PA)),props.dmid,props.dleft) tA = 'T' else #A doesn't have to be permuted if Atrans(props) - aref = reshape(A,props.dmid,props.dleft) + AM = reshape(A,props.dmid,props.dleft) tA = 'T' else - aref = reshape(A,props.dleft,props.dmid) + AM = reshape(A,props.dleft,props.dmid) end end tB = 'N' if props.permuteB - bref = reshape(permutedims(B,NTuple{NB,Int}(props.PB)),props.dmid,props.dright) + BM = reshape(permutedims(B,NTuple{NB,Int}(props.PB)),props.dmid,props.dright) else if Btrans(props) - bref = reshape(B,props.dright,props.dmid) + BM = reshape(B,props.dright,props.dmid) tB = 'T' else - bref = reshape(B,props.dmid,props.dright) + BM = reshape(B,props.dmid,props.dright) end end @@ -342,30 +352,31 @@ function _contract!(C::DenseTensor{El,NC}, if props.permuteC # Need to copy here since we will be permuting # into C later - cref = reshape(copy(C),props.dleft,props.dright) + CM = reshape(copy(C),props.dleft,props.dright) else if Ctrans(props) - cref = reshape(C,props.dleft,props.dright) + CM = reshape(C,props.dleft,props.dright) if tA=='N' && tB=='N' - (aref,bref) = (bref,aref) + (AM,BM) = (BM,AM) tA = tB = 'T' elseif tA=='T' && tB=='T' - (aref,bref) = (bref,aref) + (AM,BM) = (BM,AM) tA = tB = 'N' end else - cref = reshape(C,props.dleft,props.dright) + CM = reshape(C,props.dleft,props.dright) end end - #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),aref,bref,promote_type(T,Tβ)(β),cref) + #BLAS.gemm!(tA,tB,promote_type(T,Tα)(α),AM,BM,promote_type(T,Tβ)(β),CM) # TODO: make sure this is fast with Tensor{ElT,2}, or - # convert aref and bref to Matrix - BLAS.gemm!(tA,tB,one(El),aref,bref,zero(El),cref) + # convert AM and BM to Matrix + BLAS.gemm!(tA,tB,one(El),AM,BM,zero(El),CM) if props.permuteC - permutedims!(C,reshape(cref,props.newCrange...),NTuple{NC,Int}(props.PC)) + permutedims!(C,reshape(CM,props.newCrange...), + NTuple{NC,Int}(props.PC)) end return C end @@ -454,9 +465,9 @@ function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; fastSVD::Bool = get(kwargs,:fastSVD,false) if fastSVD - MU,MS,MV = svd(Matrix(T)) + MU,MS,MV = svd(matrix(T)) else - MU,MS,MV = recursiveSVD(Matrix(T)) + MU,MS,MV = recursiveSVD(matrix(T)) end conj!(MV) @@ -509,7 +520,7 @@ function eigenHermitian(T::DenseTensor{ElT,2,IndsT}; absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - DM,UM = eigen(Hermitian(Matrix(T))) + DM,UM = eigen(Hermitian(matrix(T))) # Sort by largest to smallest eigenvalues p = sortperm(DM; rev = true) @@ -563,14 +574,14 @@ function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} # TODO: just call qr on T directly (make sure # that is fast) - QM,RM = qr(Matrix(T)) + QM,RM = qr(matrix(T)) # Make the new indices to go onto Q and R q,r = inds(T) q = dim(q) < dim(r) ? sim(q) : sim(r) Qinds = IndsT((ind(T,1),q)) Rinds = IndsT((q,ind(T,2))) Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) - R = Tensor(Dense{ElT}(vec(Matrix(RM))),Rinds) + R = Tensor(Dense{ElT}(vec(RM)),Rinds) return Q,R end @@ -600,14 +611,29 @@ end function polar(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} - QM,RM = polar(Matrix(T)) + QM,RM = polar(matrix(T)) dim = size(QM,2) # Make the new indices to go onto Q and R q = eltype(IndsT)(dim) + # TODO: use push/pushfirst instead of a constructor + # call here Qinds = IndsT((ind(T,1),q)) Rinds = IndsT((q,ind(T,2))) - Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) - R = Tensor(Dense{ElT}(vec(Matrix(RM))),Rinds) + Q = Tensor(Dense{ElT}(vec(QM)),Qinds) + R = Tensor(Dense{ElT}(vec(RM)),Rinds) return Q,R end +function LinearAlgebra.exp(T::DenseTensor{ElT,N,IndsT}, + Lpos::NTuple{NL,Int}, + Rpos::NTuple{NR,Int}; + ishermitian::Bool=false) where {ElT,N,IndsT,NL,NR} + M = permute_reshape(T,Lpos,Rpos) + if ishermitian + expM = exp(Hermitian(matrix(M))) + else + expM = exp(matrix(M)) + end + return Tensor(Dense{ElT}(vec(expM)),permute(inds(T),(Lpos...,Rpos...))) +end + diff --git a/src/tensor/tensor.jl b/src/tensor/tensor.jl index 9ca029a50a..349217c80a 100644 --- a/src/tensor/tensor.jl +++ b/src/tensor/tensor.jl @@ -11,8 +11,9 @@ interface and no assumption of labels) struct Tensor{ElT,N,StoreT,IndsT} <: AbstractArray{ElT,N} store::StoreT inds::IndsT + # The resulting Tensor is a view into the input data function Tensor(store::StoreT,inds::IndsT) where {StoreT,IndsT} - new{eltype(StoreT),length(inds),StoreT,IndsT}(store,inds) + new{eltype(StoreT),length(IndsT),StoreT,IndsT}(store,inds) end end @@ -29,23 +30,29 @@ ind(T::Tensor,j::Integer) = inds(T)[j] dims(ds::Dims) = ds dim(ds::Dims) = prod(ds) +Base.length(ds::Type{<:Dims{N}}) where {N} = N + # This may be a bad idea to overload? # Type piracy? Base.strides(is::Dims) = Base.size_to_strides(1, dims(is)...) Base.copy(ds::Dims) = ds -Base.promote_rule(::Type{<:Dims},::Type{Val{N}}) where {N} = Dims{N} +## TODO: should this be StaticArrays.similar_type? +#Base.promote_rule(::Type{<:Dims}, +# ::Type{Val{N}}) where {N} = Dims{N} ValLength(::Type{Dims{N}}) where {N} = Val{N} -ValLength(::Dims{N}) where {N} = Val(N) +ValLength(::Dims{N}) where {N} = Val{N}() # This is to help with some generic programming in the Tensor # code (it helps to construct a Tuple(::NTuple{N,Int}) where the # only known thing for dispatch is a concrete type such # as Dims{4}) -StaticArrays.similar_type(::Type{IndsT},::Val{N}) where {IndsT<:Dims,N} = Dims{N} +StaticArrays.similar_type(::Type{<:Dims}, + ::Type{Val{N}}) where {N} = Dims{N} -unioninds(is1::Dims{N1},is2::Dims{N2}) where {N1,N2} = Dims{N1+N2}((is1...,is2...)) +unioninds(is1::Dims{N1}, + is2::Dims{N2}) where {N1,N2} = Dims{N1+N2}((is1...,is2...)) function deleteat(t::NTuple{N},pos::Int) where {N} return ntuple(i -> i < pos ? t[i] : t[i+1],Val(N-1)) diff --git a/test/test_contract.jl b/test/test_contract.jl index bd7d456439..c7da3d33a2 100644 --- a/test/test_contract.jl +++ b/test/test_contract.jl @@ -35,15 +35,15 @@ digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) end @testset "Test contract ITensor (Scalar*Vector -> Vector)" begin C = A*Ai - @test Array(C)≈scalar(A)*Array(Ai) + @test array(C)≈scalar(A)*array(Ai) end @testset "Test contract ITensor (Vector*Scalar -> Vector)" begin C = Aj*A - @test Array(C)≈scalar(A)*Array(Aj) + @test array(C)≈scalar(A)*array(Aj) end @testset "Test contract ITensors (Vectorᵀ*Vector -> Scalar)" begin C = Ai*Bi - CArray = transpose(Array(Ai))*Array(Bi) + CArray = transpose(array(Ai))*array(Bi) @test CArray≈scalar(C) end @testset "Test contract ITensors (Vector*Vectorᵀ -> Matrix)" begin @@ -55,66 +55,66 @@ digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) @testset "Test contract ITensors (Matrix*Scalar -> Matrix)" begin Aij = permute(Aij,i,j) C = Aij*A - @test Array(permute(C,i,j))≈scalar(A)*Array(Aij) + @test array(permute(C,i,j))≈scalar(A)*array(Aij) end @testset "Test contract ITensors (Matrix*Vector -> Vector)" begin Aij = permute(Aij,i,j) C = Aij*Aj - CArray = Array(permute(Aij,i,j))*Array(Aj) - @test CArray≈Array(C) + CArray = array(permute(Aij,i,j))*array(Aj) + @test CArray≈array(C) end @testset "Test contract ITensors (Matrixᵀ*Vector -> Vector)" begin Aij = permute(Aij,j,i) C = Aij*Aj - CArray = transpose(Array(Aij))*Array(Aj) - @test CArray≈Array(C) + CArray = transpose(array(Aij))*array(Aj) + @test CArray≈array(C) end @testset "Test contract ITensors (Vector*Matrix -> Vector)" begin Aij = permute(Aij,i,j) C = Ai*Aij - CArray = transpose(transpose(Array(Ai))*Array(Aij)) - @test CArray≈Array(C) + CArray = transpose(transpose(array(Ai))*array(Aij)) + @test CArray≈array(C) end @testset "Test contract ITensors (Vector*Matrixᵀ -> Vector)" begin Aij = permute(Aij,j,i) C = Ai*Aij - CArray = transpose(transpose(Array(Ai))*transpose(Array(Aij))) - @test CArray≈Array(C) + CArray = transpose(transpose(array(Ai))*transpose(array(Aij))) + @test CArray≈array(C) end @testset "Test contract ITensors (Matrix*Matrix -> Scalar)" begin Aij = permute(Aij,i,j) Bij = permute(Bij,i,j) C = Aij*Bij - CArray = tr(Array(Aij)*transpose(Array(Bij))) + CArray = tr(array(Aij)*transpose(array(Bij))) @test CArray≈scalar(C) end @testset "Test contract ITensors (Matrix*Matrix -> Matrix)" begin Aij = permute(Aij,i,j) Ajk = permute(Ajk,j,k) C = Aij*Ajk - CArray = Array(Aij)*Array(Ajk) - @test CArray≈Array(C) + CArray = array(Aij)*array(Ajk) + @test CArray≈array(C) end @testset "Test contract ITensors (Matrixᵀ*Matrix -> Matrix)" begin Aij = permute(Aij,j,i) Ajk = permute(Ajk,j,k) C = Aij*Ajk - CArray = transpose(Array(Aij))*Array(Ajk) - @test CArray≈Array(C) + CArray = transpose(array(Aij))*array(Ajk) + @test CArray≈array(C) end @testset "Test contract ITensors (Matrix*Matrixᵀ -> Matrix)" begin Aij = permute(Aij,i,j) Ajk = permute(Ajk,k,j) C = Aij*Ajk - CArray = Array(Aij)*transpose(Array(Ajk)) - @test CArray≈Array(C) + CArray = array(Aij)*transpose(array(Ajk)) + @test CArray≈array(C) end @testset "Test contract ITensors (Matrixᵀ*Matrixᵀ -> Matrix)" begin Aij = permute(Aij,j,i) Ajk = permute(Ajk,k,j) C = Aij*Ajk - CArray = transpose(Array(Aij))*transpose(Array(Ajk)) - @test CArray≈Array(C) + CArray = transpose(array(Aij))*transpose(array(Ajk)) + @test CArray≈array(C) end @testset "Test contract ITensors (Matrix⊗Matrix -> 4-tensor)" begin C = Aij*Akl @@ -125,55 +125,55 @@ digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) @testset "Test contract ITensors (3-Tensor*Scalar -> 3-Tensor)" begin Aijk = permute(Aijk,i,j,k) C = Aijk*A - @test Array(permute(C,i,j,k))==scalar(A)*Array(Aijk) + @test array(permute(C,i,j,k))==scalar(A)*array(Aijk) end @testset "Test contract ITensors (3-Tensor*Vector -> Matrix)" begin Aijk = permute(Aijk,i,j,k) C = Aijk*Ai - CArray = reshape(reshape(Array(permute(Aijk,j,k,i)),dim(j)*dim(k),dim(i))*Array(Ai),dim(j),dim(k)) - @test CArray≈Array(permute(C,j,k)) + CArray = reshape(reshape(array(permute(Aijk,j,k,i)),dim(j)*dim(k),dim(i))*array(Ai),dim(j),dim(k)) + @test CArray≈array(permute(C,j,k)) end @testset "Test contract ITensors (Vector*3-Tensor -> Matrix)" begin Aijk = permute(Aijk,i,j,k) C = Aj*Aijk - CArray = reshape(transpose(Array(Aj))*reshape(Array(permute(Aijk,j,i,k)),dim(j),dim(i)*dim(k)),dim(i),dim(k)) - @test CArray≈Array(permute(C,i,k)) + CArray = reshape(transpose(array(Aj))*reshape(array(permute(Aijk,j,i,k)),dim(j),dim(i)*dim(k)),dim(i),dim(k)) + @test CArray≈array(permute(C,i,k)) end @testset "Test contract ITensors (3-Tensor*Matrix -> Vector)" begin Aijk = permute(Aijk,i,j,k) Aik = permute(Aik,i,k) C = Aijk*Aik - CArray = reshape(Array(permute(Aijk,j,i,k)),dim(j),dim(i)*dim(k))*vec(Array(Aik)) - @test CArray≈Array(C) + CArray = reshape(array(permute(Aijk,j,i,k)),dim(j),dim(i)*dim(k))*vec(array(Aik)) + @test CArray≈array(C) end @testset "Test contract ITensors (3-Tensor*Matrix -> 3-Tensor)" begin Aijk = permute(Aijk,i,j,k) Ajl = permute(Ajl,j,l) C = Aijk*Ajl - CArray = reshape(reshape(Array(permute(Aijk,i,k,j)),dim(i)*dim(k),dim(j))*Array(Ajl),dim(i),dim(k),dim(l)) - @test CArray≈Array(permute(C,i,k,l)) + CArray = reshape(reshape(array(permute(Aijk,i,k,j)),dim(i)*dim(k),dim(j))*array(Ajl),dim(i),dim(k),dim(l)) + @test CArray≈array(permute(C,i,k,l)) end @testset "Test contract ITensors (Matrix*3-Tensor -> 3-Tensor)" begin Aijk = permute(Aijk,i,j,k) Akl = permute(Akl,k,l) C = Akl*Aijk - CArray = reshape(Array(permute(Akl,l,k))*reshape(Array(permute(Aijk,k,i,j)),dim(k),dim(i)*dim(j)),dim(l),dim(i),dim(j)) - @test CArray≈Array(permute(C,l,i,j)) + CArray = reshape(array(permute(Akl,l,k))*reshape(array(permute(Aijk,k,i,j)),dim(k),dim(i)*dim(j)),dim(l),dim(i),dim(j)) + @test CArray≈array(permute(C,l,i,j)) end @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin Aijk = permute(Aijk,i,j,k) Ajkl = permute(Ajkl,j,k,l) C = Aijk*Ajkl - CArray = reshape(Array(permute(Aijk,i,j,k)),dim(i),dim(j)*dim(k))*reshape(Array(permute(Ajkl,j,k,l)),dim(j)*dim(k),dim(l)) - @test CArray≈Array(permute(C,i,l)) + CArray = reshape(array(permute(Aijk,i,j,k)),dim(i),dim(j)*dim(k))*reshape(array(permute(Ajkl,j,k,l)),dim(j)*dim(k),dim(l)) + @test CArray≈array(permute(C,i,l)) end @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin for inds_ijk ∈ permutations([i,j,k]), inds_jkl ∈ permutations([j,k,l]) Aijk = permute(Aijk,inds_ijk...) Ajkl = permute(Ajkl,inds_jkl...) C = Ajkl*Aijk - CArray = reshape(Array(permute(Ajkl,l,j,k)),dim(l),dim(j)*dim(k))*reshape(Array(permute(Aijk,j,k,i)),dim(j)*dim(k),dim(i)) - @test CArray≈Array(permute(C,l,i)) + CArray = reshape(array(permute(Ajkl,l,j,k)),dim(l),dim(j)*dim(k))*reshape(array(permute(Aijk,j,k,i)),dim(j)*dim(k),dim(i)) + @test CArray≈array(permute(C,l,i)) end end @testset "Test contract ITensors (4-Tensor*3-Tensor -> 1-Tensor)" begin @@ -181,8 +181,8 @@ digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) Aijkl = permute(Aijkl,inds_ijkl...) Ajkl = permute(Ajkl,inds_jkl...) C = Ajkl*Aijkl - CArray = reshape(Array(permute(Ajkl,j,k,l)),1,dim(j)*dim(k)*dim(l))*reshape(Array(permute(Aijkl,j,k,l,i)),dim(j)*dim(k)*dim(l),dim(i)) - @test vec(CArray)≈Array(permute(C,i)) + CArray = reshape(array(permute(Ajkl,j,k,l)),1,dim(j)*dim(k)*dim(l))*reshape(array(permute(Aijkl,j,k,l,i)),dim(j)*dim(k)*dim(l),dim(i)) + @test vec(CArray)≈array(permute(C,i)) end end @testset "Test contract ITensors (4-Tensor*3-Tensor -> 3-Tensor)" begin @@ -190,8 +190,8 @@ digits(::Type{T},i,j,k) where {T} = T(i*10^2+j*10+k) Aijkl = permute(Aijkl,inds_ijkl...) Aklα = permute(Aklα,inds_klα...) C = Aklα*Aijkl - CArray = reshape(reshape(Array(permute(Aklα,α,k,l)),dim(α),dim(k)*dim(l))*reshape(Array(permute(Aijkl,k,l,i,j)),dim(k)*dim(l),dim(i)*dim(j)),dim(α),dim(i),dim(j)) - @test CArray≈Array(permute(C,α,i,j)) + CArray = reshape(reshape(array(permute(Aklα,α,k,l)),dim(α),dim(k)*dim(l))*reshape(array(permute(Aijkl,k,l,i,j)),dim(k)*dim(l),dim(i)*dim(j)),dim(α),dim(i),dim(j)) + @test CArray≈array(permute(C,α,i,j)) end end end # End contraction testset @@ -226,7 +226,7 @@ end A = randomITensor(Float64,i,j) B = randomITensor(ComplexF64,j,k) C = A*B - @test Array(permute(C,i,k)) ≈ Array(A)*Array(B) + @test array(permute(C,i,k)) ≈ array(A)*array(B) end @testset "Complex ITensor * Real ITensor" begin i = Index(2,"i") @@ -235,7 +235,7 @@ end A = randomITensor(ComplexF64,i,j) B = randomITensor(Float64,j,k) C = A*B - @test Array(permute(C,i,k)) ≈ Array(A)*Array(B) + @test array(permute(C,i,k)) ≈ array(A)*array(B) end @testset "Outer Product Real ITensor * Complex ITensor" begin @@ -244,7 +244,7 @@ end A = randomITensor(Float64, i) B = randomITensor(ComplexF64, j) C = A*B - @test Array(permute(C, i, j)) ≈ kron(Array(A), transpose(Array(B))) + @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B))) end @testset "Outer Product: Complex ITensor * Real ITensor" begin @@ -253,7 +253,7 @@ end A = randomITensor(ComplexF64, i) B = randomITensor(Float64, j) C = A*B - @test Array(permute(C, i, j)) ≈ kron(Array(A), transpose(Array(B))) + @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B))) end diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index a104a24f50..3acd327bb5 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -53,9 +53,9 @@ digits(::Type{T},x...) where {T} = T(sum([x[length(x)-k+1]*10^(k-1) for k=1:leng A = ITensor(M,i,j) @test store(A) isa Dense{Float64} - @test M ≈ Matrix(A,i,j) - @test M' ≈ Matrix(A,j,i) - @test_throws DimensionMismatch Vector(A) + @test M ≈ matrix(A,i,j) + @test M' ≈ matrix(A,j,i) + @test_throws DimensionMismatch vector(A) @test size(A,1) == size(M,1) == 2 @test size(A,3) == size(M,3) == 1 @@ -69,30 +69,30 @@ digits(::Type{T},x...) where {T} = T(sum([x[length(x)-k+1]*10^(k-1) for k=1:leng @testset "To Matrix" begin TM = randomITensor(i,j) - M1 = Matrix(TM) + M1 = matrix(TM) for ni in i, nj in j @test M1[ni,nj] ≈ TM[i(ni),j(nj)] end - M2 = Matrix(TM,j,i) + M2 = matrix(TM,j,i) for ni in i, nj in j @test M2[nj,ni] ≈ TM[i(ni),j(nj)] end T3 = randomITensor(i,j,k) - @test_throws DimensionMismatch Matrix(T3,i,j) + @test_throws DimensionMismatch matrix(T3,i,j) end @testset "To Vector" begin TV = randomITensor(i) - V = Vector(TV) + V = vector(TV) for ni in i @test V[ni] ≈ TV[i(ni)] end T2 = randomITensor(i,j) - @test_throws DimensionMismatch Vector(T2) + @test_throws DimensionMismatch vector(T2) end @testset "Complex" begin @@ -181,32 +181,27 @@ end i1 = Index(2,"i1") i2 = Index(2,"i2") Amat = rand(2,2,2,2) - A = ITensor(Amat, i1,i2,s1,s2) + A = ITensor(Amat,i1,i2,s1,s2) - Aexp = exp(A,IndexSet(i1,i2)) - Amatexp = reshape( exp(reshape(Amat,4,4)), 2,2,2,2) - Aexp_from_mat = ITensor(Amatexp, i1,i2,s1,s2) + Aexp = exp(A,(i1,i2),(s1,s2)) + Amatexp = reshape(exp(reshape(Amat,4,4)),2,2,2,2) + Aexp_from_mat = ITensor(Amatexp,i1,i2,s1,s2) @test Aexp ≈ Aexp_from_mat #test that exponentiation works when indices need to be permuted - Aexp = exp(A,IndexSet(s1,s2)) - Amatexp = Array( exp( reshape(Amat,4,4))' ) - Aexp_from_mat = ITensor(reshape(Amatexp,2,2,2,2), s1,s2,i1,i2) + Aexp = exp(A,(s1,s2),(i1,i2)) + Amatexp = Matrix(exp(reshape(Amat,4,4))') + Aexp_from_mat = ITensor(reshape(Amatexp,2,2,2,2),s1,s2,i1,i2) @test Aexp ≈ Aexp_from_mat #test exponentiation when hermitian=true is used Amat = reshape(Amat, 4,4) - Amat = reshape( Amat + Amat' + randn(4,4)*1e-10 , 2,2,2,2) - A = ITensor(Amat, i1,i2,s1,s2) - Aexp = exp(A,IndexSet(i1,i2), hermitian=true) - Amatexp = Array(reshape( exp(Hermitian(reshape(Amat,4,4))), 2,2,2,2)) - Aexp_from_mat = ITensor(Amatexp, i1,i2,s1,s2) + Amat = reshape(Amat+Amat'+randn(4,4)*1e-10,2,2,2,2) + A = ITensor(Amat,i1,i2,s1,s2) + Aexp = exp(A,(i1,i2),(s1,s2),ishermitian=true) + Amatexp = reshape(Matrix(exp(Hermitian(reshape(Amat,4,4)))),2,2,2,2) + Aexp_from_mat = ITensor(Amatexp,i1,i2,s1,s2) @test Aexp ≈ Aexp_from_mat - - - - @test_throws DimensionMismatch exp(A,IndexSet(s1)) - end @@ -469,7 +464,7 @@ end for ii ∈ 1:dim(i), jj ∈ 1:dim(j), kk ∈ 1:dim(k) @test C[i(ii),j(jj),k(kk)]==A[j(jj),i(ii),k(kk)]+B[i(ii),k(kk),j(jj)] end - @test Array(permute(C,i,j,k))==Array(permute(A,i,j,k))+Array(permute(B,i,j,k)) + @test array(permute(C,i,j,k))==array(permute(A,i,j,k))+array(permute(B,i,j,k)) end @testset "Test factorizations of an ITensor" begin @@ -489,7 +484,7 @@ end jj = Index(4) T = randomITensor(ComplexF64,ii,jj) U,S,V = svd(T,ii;maxdim=2) - u,s,v = svd(Matrix(T)) + u,s,v = svd(matrix(T)) @test norm(U*S*V-T)≈sqrt(s[3]^2+s[4]^2) end diff --git a/test/test_mpo.jl b/test/test_mpo.jl index 6af413b3dc..e34e493352 100644 --- a/test/test_mpo.jl +++ b/test/test_mpo.jl @@ -48,9 +48,9 @@ include("util.jl") # Do contraction manually. O = 1. for j ∈ eachindex(phi) - psij = reshape(Array(psi[j]),2) - phij = reshape(Array(phi[j]),2) - Kj = reshape(Array(K[j]),2,2) + psij = reshape(array(psi[j]),2) + phij = reshape(array(phi[j]),2) + Kj = reshape(array(K[j]),2,2) O *= (phij'*transpose(Kj)*psij)[] end @test O ≈ inner(phi,K,psi) @@ -61,9 +61,9 @@ include("util.jl") # make bigger random MPO... for link_dim in 2:5 - mpo_tensors = [ITensor() for ii in 1:N] - mps_tensors = [ITensor() for ii in 1:N] - mps_tensors2 = [ITensor() for ii in 1:N] + mpo_tensors = ITensor[ITensor() for ii in 1:N] + mps_tensors = ITensor[ITensor() for ii in 1:N] + mps_tensors2 = ITensor[ITensor() for ii in 1:N] mpo_link_inds = [Index(link_dim, "r$ii,Link") for ii in 1:N-1] mps_link_inds = [Index(link_dim, "r$ii,Link") for ii in 1:N-1] mpo_tensors[1] = randomITensor(mpo_link_inds[1], sites[1], sites[1]') @@ -120,10 +120,10 @@ include("util.jl") ## Do contraction manually. #O = 1. #for j ∈ eachindex(phi) - # psij = reshape(Array(psi[j]),2) - # phij = reshape(Array(phi[j]),2) - # Kj = reshape(Array(K[j]),2,2) - # Jj = reshape(Array(J[j]),2,2) + # psij = reshape(array(psi[j]),2) + # phij = reshape(array(phi[j]),2) + # Kj = reshape(array(K[j]),2,2) + # Jj = reshape(array(J[j]),2,2) # O *= ((transpose(Jj)*phij)'*transpose(Kj)*psij)[] #end #@test O ≈ inner(J,phi,K,psi) @@ -167,9 +167,9 @@ include("util.jl") # make bigger random MPO... for link_dim in 2:5 - mpo_tensors = [ITensor() for ii in 1:N] - mps_tensors = [ITensor() for ii in 1:N] - mps_tensors2 = [ITensor() for ii in 1:N] + mpo_tensors = ITensor[ITensor() for ii in 1:N] + mps_tensors = ITensor[ITensor() for ii in 1:N] + mps_tensors2 = ITensor[ITensor() for ii in 1:N] mpo_link_inds = [Index(link_dim, "r$ii,Link") for ii in 1:N-1] mps_link_inds = [Index(link_dim, "r$ii,Link") for ii in 1:N-1] mpo_tensors[1] = randomITensor(mpo_link_inds[1], sites[1], sites[1]') diff --git a/test/test_phys_sitesets.jl b/test/test_phys_sitesets.jl index 90f066adff..15af82eb99 100644 --- a/test/test_phys_sitesets.jl +++ b/test/test_phys_sitesets.jl @@ -22,16 +22,16 @@ using ITensors, @test hasinds(Sz5,s[5]',s[5]) @test_throws ArgumentError op(s, "Fake", 2) - @test Array(op(s,"S+",3),s[3]',s[3]) ≈ [ 0.0 1.0; 0.0 0.0] - @test Array(op(s,"S-",4),s[4]',s[4]) ≈ [ 0.0 0.0; 1.0 0.0] - @test Array(op(s,"Sx",2),s[2]',s[2]) ≈ [ 0.0 0.5; 0.5 0.0] - @test Array(op(s,"iSy",2),s[2]',s[2]) ≈ [ 0.0 0.5;-0.5 0.0] - @test Array(op(s,"Sy",2),s[2]',s[2]) ≈ [0.0 -0.5im; 0.5im 0.0] - @test Array(op(s,"Sz",2),s[2]',s[2]) ≈ [ 0.5 0.0; 0.0 -0.5] - @test Array(op(s,"projUp",2),s[2]',s[2]) ≈ [ 1.0 0.0; 0.0 0.0] - @test Array(op(s,"projDn",2),s[2]',s[2]) ≈ [ 0.0 0.0; 0.0 1.0] - @test Array(op(s,"Up",2),s[2]) ≈ [1.0,0.0] - @test Array(op(s,"Dn",2),s[2]) ≈ [0.0,1.0] + @test array(op(s,"S+",3),s[3]',s[3]) ≈ [ 0.0 1.0; 0.0 0.0] + @test array(op(s,"S-",4),s[4]',s[4]) ≈ [ 0.0 0.0; 1.0 0.0] + @test array(op(s,"Sx",2),s[2]',s[2]) ≈ [ 0.0 0.5; 0.5 0.0] + @test array(op(s,"iSy",2),s[2]',s[2]) ≈ [ 0.0 0.5;-0.5 0.0] + @test array(op(s,"Sy",2),s[2]',s[2]) ≈ [0.0 -0.5im; 0.5im 0.0] + @test array(op(s,"Sz",2),s[2]',s[2]) ≈ [ 0.5 0.0; 0.0 -0.5] + @test array(op(s,"projUp",2),s[2]',s[2]) ≈ [ 1.0 0.0; 0.0 0.0] + @test array(op(s,"projDn",2),s[2]',s[2]) ≈ [ 0.0 0.0; 0.0 1.0] + @test array(op(s,"Up",2),s[2]) ≈ [1.0,0.0] + @test array(op(s,"Dn",2),s[2]) ≈ [0.0,1.0] end @testset "Spin One Site" begin @@ -50,21 +50,21 @@ using ITensors, @test hasinds(Sz5,s[5]',s[5]) @test_throws ArgumentError op(s, "Fake", 2) - @test Array(op(s,"S+",3),s[3]',s[3]) ≈ [ 0 √2 0; 0 0 √2; 0 0 0] - @test Array(op(s,"S-",3),s[3]',s[3]) ≈ [ 0 0 0; √2 0 0; 0.0 √2 0] - @test Array(op(s,"Sx",3),s[3]',s[3]) ≈ [ 0 1/√2 0; 1/√2 0 1/√2; 0 1/√2 0] - @test Array(op(s,"iSy",3),s[3]',s[3]) ≈ [ 0 1/√2 0; -1/√2 0 1/√2; 0 -1/√2 0] - @test Array(op(s,"Sy",3),s[3]',s[3]) ≈ [ 0 -1/√2im 0; +1/√2im 0 -1/√2im; 0 +1/√2im 0] - @test Array(op(s,"Sz",2),s[2]',s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0] - @test Array(op(s,"Sz2",2),s[2]',s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 +1.0] - @test Array(op(s,"Sx2",2),s[2]',s[2]) ≈ [0.5 0 0.5;0 1.0 0;0.5 0 0.5] - @test Array(op(s,"Sy2",2),s[2]',s[2]) ≈ [0.5 0 -0.5;0 1.0 0;-0.5 0 0.5] - @test Array(op(s,"projUp",2),s[2]',s[2]) ≈ [1.0 0 0;0 0 0;0 0 0] - @test Array(op(s,"projZ0",2),s[2]',s[2]) ≈ [0 0 0;0 1.0 0;0 0 0] - @test Array(op(s,"projDn",2),s[2]',s[2]) ≈ [0 0 0;0 0 0;0 0 1.0] - @test Array(op(s,"XUp",2),s[2]) ≈ [0.5,im*√2,0.5] - @test Array(op(s,"XZ0",2),s[2]) ≈ [im*√2,0,-im*√2] - @test Array(op(s,"XDn",2),s[2]) ≈ [0.5,-im*√2,0.5] + @test array(op(s,"S+",3),s[3]',s[3]) ≈ [ 0 √2 0; 0 0 √2; 0 0 0] + @test array(op(s,"S-",3),s[3]',s[3]) ≈ [ 0 0 0; √2 0 0; 0.0 √2 0] + @test array(op(s,"Sx",3),s[3]',s[3]) ≈ [ 0 1/√2 0; 1/√2 0 1/√2; 0 1/√2 0] + @test array(op(s,"iSy",3),s[3]',s[3]) ≈ [ 0 1/√2 0; -1/√2 0 1/√2; 0 -1/√2 0] + @test array(op(s,"Sy",3),s[3]',s[3]) ≈ [ 0 -1/√2im 0; +1/√2im 0 -1/√2im; 0 +1/√2im 0] + @test array(op(s,"Sz",2),s[2]',s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0] + @test array(op(s,"Sz2",2),s[2]',s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 +1.0] + @test array(op(s,"Sx2",2),s[2]',s[2]) ≈ [0.5 0 0.5;0 1.0 0;0.5 0 0.5] + @test array(op(s,"Sy2",2),s[2]',s[2]) ≈ [0.5 0 -0.5;0 1.0 0;-0.5 0 0.5] + @test array(op(s,"projUp",2),s[2]',s[2]) ≈ [1.0 0 0;0 0 0;0 0 0] + @test array(op(s,"projZ0",2),s[2]',s[2]) ≈ [0 0 0;0 1.0 0;0 0 0] + @test array(op(s,"projDn",2),s[2]',s[2]) ≈ [0 0 0;0 0 0;0 0 1.0] + @test array(op(s,"XUp",2),s[2]) ≈ [0.5,im*√2,0.5] + @test array(op(s,"XZ0",2),s[2]) ≈ [im*√2,0,-im*√2] + @test array(op(s,"XDn",2),s[2]) ≈ [0.5,-im*√2,0.5] end @testset "Electron Site" begin @@ -83,41 +83,41 @@ using ITensors, @test hasinds(Nup5,s[5]',s[5]) @test_throws ArgumentError op(s, "Fake", 2) - Nup3 = Array(op(s,"Nup",3),s[3]',s[3]) + Nup3 = array(op(s,"Nup",3),s[3]',s[3]) @test Nup3 ≈ [0. 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1] - Ndn3 = Array(op(s,"Ndn",3),s[3]',s[3]) + Ndn3 = array(op(s,"Ndn",3),s[3]',s[3]) @test Ndn3 ≈ [0. 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1] - Ntot3 = Array(op(s,"Ntot",3),s[3]',s[3]) + Ntot3 = array(op(s,"Ntot",3),s[3]',s[3]) @test Ntot3 ≈ [0. 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2] - Cup3 = Array(op(s,"Cup",3),s[3]',s[3]) + Cup3 = array(op(s,"Cup",3),s[3]',s[3]) @test Cup3 ≈ [0. 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0] - Cdagup3 = Array(op(s,"Cdagup",3),s[3]',s[3]) + Cdagup3 = array(op(s,"Cdagup",3),s[3]',s[3]) @test Cdagup3 ≈ [0. 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0] - Cdn3 = Array(op(s,"Cdn",3),s[3]',s[3]) + Cdn3 = array(op(s,"Cdn",3),s[3]',s[3]) @test Cdn3 ≈ [0. 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0] - Cdagdn3 = Array(op(s,"Cdagdn",3),s[3]',s[3]) + Cdagdn3 = array(op(s,"Cdagdn",3),s[3]',s[3]) @test Cdagdn3 ≈ [0. 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0] - F3 = Array(op(s,"F",3),s[3]',s[3]) + F3 = array(op(s,"F",3),s[3]',s[3]) @test F3 ≈ [1. 0 0 0; 0 -1 0 0; 0 0 -1 0; 0 0 0 1] - Fup3 = Array(op(s,"Fup",3),s[3]',s[3]) + Fup3 = array(op(s,"Fup",3),s[3]',s[3]) @test Fup3 ≈ [1. 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1] - Fdn3 = Array(op(s,"Fdn",3),s[3]',s[3]) + Fdn3 = array(op(s,"Fdn",3),s[3]',s[3]) @test Fdn3 ≈ [1. 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1] - Sz3 = Array(op(s,"Sz",3),s[3]',s[3]) + Sz3 = array(op(s,"Sz",3),s[3]',s[3]) @test Sz3 ≈ [0. 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0] - Sx3 = Array(op(s,"Sx",3),s[3]',s[3]) + Sx3 = array(op(s,"Sx",3),s[3]',s[3]) @test Sx3 ≈ [0. 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0] - Sp3 = Array(op(s,"S+",3),s[3]',s[3]) + Sp3 = array(op(s,"S+",3),s[3]',s[3]) @test Sp3 ≈ [0. 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0] - Sm3 = Array(op(s,"S-",3),s[3]',s[3]) + Sm3 = array(op(s,"S-",3),s[3]',s[3]) @test Sm3 ≈ [0. 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0] - Sem = Array(op(s,"Emp",3),s[3]) + Sem = array(op(s,"Emp",3),s[3]) @test Sem ≈ [1.0; 0.0; 0.0; 0.0] - Sup = Array(op(s,"Up",3),s[3]) + Sup = array(op(s,"Up",3),s[3]) @test Sup ≈ [0.0; 1.0; 0.0; 0.0] - Sdn = Array(op(s,"Dn",3),s[3]) + Sdn = array(op(s,"Dn",3),s[3]) @test Sdn ≈ [0.0; 0.0; 1.0; 0.0] - Supdn = Array(op(s,"UpDn",3),s[3]) + Supdn = array(op(s,"UpDn",3),s[3]) @test Supdn ≈ [0.0; 0.0; 0.0; 1.0] end @@ -141,33 +141,33 @@ using ITensors, Ntot_2 = op(s,"Ntot",2) @test Ntot_2[2,2] ≈ 1.0 @test Ntot_2[3,3] ≈ 1.0 - Cup3 = Array(op(s,"Cup",3),s[3]',s[3]) + Cup3 = array(op(s,"Cup",3),s[3]',s[3]) @test Cup3 ≈ [0. 1 0; 0 0 0; 0 0 0] - Cdup3 = Array(op(s,"Cdagup",3),s[3]',s[3]) + Cdup3 = array(op(s,"Cdagup",3),s[3]',s[3]) @test Cdup3 ≈ [0 0 0; 1. 0 0; 0 0 0] - Cdn3 = Array(op(s,"Cdn",3),s[3]',s[3]) + Cdn3 = array(op(s,"Cdn",3),s[3]',s[3]) @test Cdn3 ≈ [0. 0. 1; 0 0 0; 0 0 0] - Cddn3 = Array(op(s,"Cdagdn",3),s[3]',s[3]) + Cddn3 = array(op(s,"Cdagdn",3),s[3]',s[3]) @test Cddn3 ≈ [0 0 0; 0. 0 0; 1 0 0] - FP3 = Array(op(s,"FP",3),s[3]',s[3]) + FP3 = array(op(s,"FP",3),s[3]',s[3]) @test FP3 ≈ [1.0 0. 0; 0 -1.0 0; 0 0 -1.0] - Fup3 = Array(op(s,"Fup",3),s[3]',s[3]) + Fup3 = array(op(s,"Fup",3),s[3]',s[3]) @test Fup3 ≈ [1.0 0. 0; 0 -1.0 0; 0 0 1.0] - Fdn3 = Array(op(s,"Fdn",3),s[3]',s[3]) + Fdn3 = array(op(s,"Fdn",3),s[3]',s[3]) @test Fdn3 ≈ [1.0 0. 0; 0 1.0 0; 0 0 -1.0] - Sz3 = Array(op(s,"Sz",3),s[3]',s[3]) + Sz3 = array(op(s,"Sz",3),s[3]',s[3]) @test Sz3 ≈ [0.0 0. 0; 0 0.5 0; 0 0 -0.5] - Sx3 = Array(op(s,"Sx",3),s[3]',s[3]) + Sx3 = array(op(s,"Sx",3),s[3]',s[3]) @test Sx3 ≈ [0.0 0. 0; 0 0 1; 0 1 0] - Sp3 = Array(op(s,"Splus",3),s[3]',s[3]) + Sp3 = array(op(s,"Splus",3),s[3]',s[3]) @test Sp3 ≈ [0.0 0. 0; 0 0 1.0; 0 0 0] - Sm3 = Array(op(s,"Sminus",3),s[3]',s[3]) + Sm3 = array(op(s,"Sminus",3),s[3]',s[3]) @test Sm3 ≈ [0.0 0. 0; 0 0 0; 0 1.0 0] - Up3 = Array(op(s,"Up",3),s[3]) + Up3 = array(op(s,"Up",3),s[3]) @test Up3 ≈ [0.0; 1.0; 0] - Dn3 = Array(op(s,"Dn",3),s[3]) + Dn3 = array(op(s,"Dn",3),s[3]) @test Dn3 ≈ [0.0; 0.0; 1.0] - Em3 = Array(op(s,"Emp",3),s[3]) + Em3 = array(op(s,"Emp",3),s[3]) @test Em3 ≈ [1.0; 0.0; 0.0] end end From e197ad3311328ae276b3f6893213ea2c9c05b626 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 1 Oct 2019 15:31:11 -0400 Subject: [PATCH 19/27] Speed up contraction a bit. Fix bug in eigenHermitian (wasn't truncating) --- Manifest.toml | 73 +++++++++++++++++ Project.toml | 5 +- src/ITensors.jl | 5 +- src/decomp.jl | 6 +- src/indexset.jl | 27 ++++--- src/mps/mpo.jl | 8 +- src/tensor/contraction_logic.jl | 65 ++++++++------- src/tensor/dense.jl | 135 ++++--------------------------- src/tensor/diag.jl | 8 +- src/tensor/linearalgebra.jl | 139 ++++++++++++++++++++++++++++++++ test/test_ctmrg.jl | 1 + test/test_itensor_dense.jl | 3 +- 12 files changed, 299 insertions(+), 176 deletions(-) create mode 100644 Manifest.toml create mode 100644 src/tensor/linearalgebra.jl diff --git a/Manifest.toml b/Manifest.toml new file mode 100644 index 0000000000..71b5c72686 --- /dev/null +++ b/Manifest.toml @@ -0,0 +1,73 @@ +# This file is machine-generated - editing it directly is not advised + +[[Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[Crayons]] +deps = ["Test"] +git-tree-sha1 = "f621b8ef51fd2004c7cf157ea47f027fdeac5523" +uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" +version = "4.0.0" + +[[Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[LinearAlgebra]] +deps = ["Libdl"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[Random]] +deps = ["Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[StaticArrays]] +deps = ["LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "db23bbf50064c582b6f2b9b043c8e7e98ea8c0c6" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "0.11.0" + +[[Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[[Test]] +deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[TimerOutputs]] +deps = ["Crayons", "Printf", "Test", "Unicode"] +git-tree-sha1 = "b80671c06f8f8bae08c55d67b5ce292c5ae2660c" +uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" +version = "0.5.0" + +[[Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" diff --git a/Project.toml b/Project.toml index 9e3b61a417..0c174ad8d3 100644 --- a/Project.toml +++ b/Project.toml @@ -8,14 +8,15 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" +TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [compat] julia = "1" [extras] Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Test","Combinatorics","QuadGK"] +test = ["Test", "Combinatorics", "QuadGK"] diff --git a/src/ITensors.jl b/src/ITensors.jl index f4fb4dbf60..d5d476b212 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -3,7 +3,8 @@ module ITensors using Random, Printf, LinearAlgebra, - StaticArrays # For SmallString + StaticArrays, + TimerOutputs # TODO: move imports to individual files import Base.adjoint, @@ -51,6 +52,7 @@ import Base.adjoint, # Global Variables # const warnTensorOrder = 10 +const to = TimerOutput() ##################################### # Index and IndexSet @@ -68,6 +70,7 @@ include("tensor/tensor.jl") include("tensor/tensorstorage.jl") include("tensor/contraction_logic.jl") include("tensor/dense.jl") +include("tensor/linearalgebra.jl") include("tensor/diag.jl") include("tensor/combiner.jl") include("tensor/truncate.jl") diff --git a/src/decomp.jl b/src/decomp.jl index 90ae5b70e7..8dfb65b0fc 100644 --- a/src/decomp.jl +++ b/src/decomp.jl @@ -124,7 +124,8 @@ function _factorize_from_left_eigen(A::ITensor, kwargs...) Lis = commoninds(inds(A),IndexSet(Linds...)) A² = A*prime(dag(A),Lis) - FU,D = eigenHermitian(A²,Lis,prime(Lis);kwargs...) + FU,D = eigenHermitian(A²,Lis,prime(Lis); ispossemidef=true, + kwargs...) FV = dag(FU)*A return FU,FV,commonindex(FU,FV) end @@ -134,7 +135,8 @@ function _factorize_from_right_eigen(A::ITensor, kwargs...) Ris = uniqueinds(inds(A),IndexSet(Linds...)) A² = A*prime(dag(A),Ris) - FV,D = eigenHermitian(A²,Ris,prime(Ris); kwargs...) + FV,D = eigenHermitian(A²,Ris,prime(Ris); ispossemidef=true, + kwargs...) FU = A*dag(FV) return FU,FV,commonindex(FU,FV) end diff --git a/src/indexset.jl b/src/indexset.jl index e8391b1e61..44a48b74d2 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -515,33 +515,34 @@ end # Move this to tensor, since this logic is different # for contracting different kinds of storage +# Also, generalize this to not just use IndexSet function contract_inds(Ais::IndexSet{N1}, - Aind::NTuple{N1,Int}, + Alabel::NTuple{N1,Int}, Bis::IndexSet{N2}, - Bind::NTuple{N2,Int}) where {N1,N2} + Blabel::NTuple{N2,Int}) where {N1,N2} ncont = 0 - for i in Aind - if(i < 0) ncont += 1 end + for i in Alabel + i < 0 && (ncont += 1) end NR = N1+N2-2*ncont - Cind = MVector{NR,Int}(ntuple(_->0,Val(NR))) #zeros(Int,NR) - Cis = IndexSet{NR}() #fill(Index(),NR) + Clabel = Vector{Int}(undef,NR) + Cis = Vector{Index}(undef,NR) u = 1 - for i ∈ 1:length(Ais) - if(Aind[i] > 0) - Cind[u] = Aind[i]; + @inbounds for i ∈ 1:N1 + if(Alabel[i] > 0) + Clabel[u] = Alabel[i]; Cis[u] = Ais[i]; u += 1 end end - for i ∈ 1:length(Bis) - if(Bind[i] > 0) - Cind[u] = Bind[i]; + @inbounds for i ∈ 1:N2 + if(Blabel[i] > 0) + Clabel[u] = Blabel[i]; Cis[u] = Bis[i]; u += 1 end end - return (Cis,NTuple{NR,Int}(Cind)) + return IndexSet{NR}(Cis...),NTuple{NR,Int}(Clabel) end # TODO: implement this in terms of a tuple, diff --git a/src/mps/mpo.jl b/src/mps/mpo.jl index 6b29a9170f..e318070509 100644 --- a/src/mps/mpo.jl +++ b/src/mps/mpo.jl @@ -365,7 +365,9 @@ function densityMatrixApplyMPO(A::MPO, psi::MPS; kwargs...)::MPS ts = tags(commonindex(psi[n], psi[n-1])) Lis = commonindex(ρ, A[n]) Ris = uniqueinds(ρ, Lis) - FU, D = eigenHermitian(ρ, Lis, Ris; tags=ts, kwargs...) + FU, D = eigenHermitian(ρ, Lis, Ris; ispossemidef=true, + tags=ts, + kwargs...) psi_out[n] = setprime(dag(FU), 0, "Site") O = O * FU * psi[n-1] * A[n-1] O = prime(O, -1, "Site") @@ -375,7 +377,9 @@ function densityMatrixApplyMPO(A::MPO, psi::MPS; kwargs...)::MPS ts = tags(commonindex(psi[j], psi[j-1])) Lis = IndexSet(commonindex(ρ, A[j]), commonindex(ρ, psi_out[j+1])) Ris = uniqueinds(ρ, Lis) - FU, D = eigenHermitian(ρ, Lis, Ris; tags=ts, kwargs...) + FU, D = eigenHermitian(ρ, Lis, Ris; ispossemidef=true, + tags=ts, + kwargs...) psi_out[j] = dag(FU) O = O * FU * psi[j-1] * A[j-1] O = prime(O, -1, "Site") diff --git a/src/tensor/contraction_logic.jl b/src/tensor/contraction_logic.jl index 1cc86cf81e..bedcc7979e 100644 --- a/src/tensor/contraction_logic.jl +++ b/src/tensor/contraction_logic.jl @@ -1,14 +1,14 @@ -mutable struct ContractionProperties{Na,Nb,Nc} - ai::NTuple{Na,Int} - bi::NTuple{Nb,Int} - ci::NTuple{Nc,Int} +mutable struct ContractionProperties{NA,NB,NC} + ai::NTuple{NA,Int} + bi::NTuple{NB,Int} + ci::NTuple{NC,Int} nactiveA::Int nactiveB::Int nactiveC::Int - AtoB::Vector{Int} - AtoC::Vector{Int} - BtoC::Vector{Int} + AtoB::MVector{NA,Int} + AtoC::MVector{NA,Int} + BtoC::MVector{NB,Int} permuteA::Bool permuteB::Bool permuteC::Bool @@ -27,26 +27,28 @@ mutable struct ContractionProperties{Na,Nb,Nc} newArange::Vector{Int} newBrange::Vector{Int} newCrange::Vector{Int} - function ContractionProperties(ai::NTuple{Na,Int},bi::NTuple{Nb,Int},ci::NTuple{Nc,Int}) where {Na,Nb,Nc} - new{Na,Nb,Nc}(ai,bi,ci,0,0,0,Vector{Int}(),Vector{Int}(),Vector{Int}(),false,false,false,1,1,1,0, - length(ai),length(bi),length(ai),length(bi),Vector{Int}(),Vector{Int}(),Vector{Int}(), - false,Vector{Int}(),Vector{Int}(),Vector{Int}()) + function ContractionProperties(ai::NTuple{NA,Int}, + bi::NTuple{NB,Int}, + ci::NTuple{NC,Int}) where {NA,NB,NC} + new{NA,NB,NC}(ai,bi,ci,0,0,0, + ntuple(_->0,NA),ntuple(_->0,NA),ntuple(_->0,NB), + false,false,false,1,1,1,0, + NA,NB,NA,NB, + Vector{Int}(),Vector{Int}(),Vector{Int}(), + false, + Vector{Int}(),Vector{Int}(),Vector{Int}()) end end -function compute_perms!(props::ContractionProperties) - #Use !AtoB.empty() as a check to see if we've already run this - length(props.AtoB)!=0 && return +function compute_perms!(props::ContractionProperties{NA,NB,NC}) where + {NA,NB,NC} + #length(props.AtoB)!=0 && return - na = length(props.ai) - nb = length(props.bi) - nc = length(props.ci) - - props.AtoB = fill(0,na) - props.AtoC = fill(0,na) - props.BtoC = fill(0,nb) - for i = 1:na - for j = 1:nb + #props.AtoB = fill(0,NA) + #props.AtoC = fill(0,NA) + #props.BtoC = fill(0,NB) + for i = 1:NA + for j = 1:NB if props.ai[i]==props.bi[j] props.ncont += 1 #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index) @@ -58,8 +60,8 @@ function compute_perms!(props::ContractionProperties) end end - for i = 1:na - for k = 1:nc + for i = 1:NA + for k = 1:NC if props.ai[i]==props.ci[k] #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index) i<=props.Austart && (props.Austart = i) @@ -69,8 +71,8 @@ function compute_perms!(props::ContractionProperties) end end - for j = 1:nb - for k = 1:nc + for j = 1:NB + for k = 1:NC if props.bi[j]==props.ci[k] #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index) j<=props.Bustart && (props.Bustart = j) @@ -112,15 +114,16 @@ Atrans(props::ContractionProperties) = contractedA(props,1) Btrans(props::ContractionProperties) = !contractedB(props,1) Ctrans(props::ContractionProperties) = props.ctrans -function compute_contraction_properties!(props::ContractionProperties, A, B, C) +function compute_contraction_properties!(props::ContractionProperties{NA,NB,NC}, + A,B,C) where {NA,NB,NC} compute_perms!(props) #Use props.PC.size() as a check to see if we've already run this length(props.PC)!=0 && return - ra = length(props.ai) - rb = length(props.bi) - rc = length(props.ci) + ra = NA #length(props.ai) + rb = NB #length(props.bi) + rc = NC #length(props.ci) props.PC = fill(0,rc) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 01103a026c..9ba67974be 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -372,7 +372,9 @@ function _contract!(C::DenseTensor{El,NC}, # TODO: make sure this is fast with Tensor{ElT,2}, or # convert AM and BM to Matrix - BLAS.gemm!(tA,tB,one(El),AM,BM,zero(El),CM) + BLAS.gemm!(tA,tB,one(El), + AM,BM, + zero(El),CM) if props.permuteC permutedims!(C,reshape(CM,props.newCrange...), @@ -454,50 +456,8 @@ function LinearAlgebra.svd(T::DenseTensor{<:Number,N,IndsT}, return U,S,V end -# svd of an order-2 tensor -function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; - kwargs...) where {ElT,IndsT} - maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) - mindim::Int = get(kwargs,:mindim,1) - cutoff::Float64 = get(kwargs,:cutoff,0.0) - absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) - doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - fastSVD::Bool = get(kwargs,:fastSVD,false) - - if fastSVD - MU,MS,MV = svd(matrix(T)) - else - MU,MS,MV = recursiveSVD(matrix(T)) - end - conj!(MV) - - P = MS.^2 - truncate!(P;mindim=mindim, - maxdim=maxdim, - cutoff=cutoff, - absoluteCutoff=absoluteCutoff, - doRelCutoff=doRelCutoff) - dS = length(P) - if dS < length(MS) - MU = MU[:,1:dS] - resize!(MS,dS) - MV = MV[:,1:dS] - end - - # Make the new indices to go onto U and V - u = eltype(IndsT)(dS) - v = eltype(IndsT)(dS) - Uinds = IndsT((ind(T,1),u)) - Sinds = IndsT((u,v)) - Vinds = IndsT((ind(T,2),v)) - U = Tensor(Dense{ElT}(vec(MU)),Uinds) - S = Tensor(Diag{Vector{real(ElT)}}(MS),Sinds) - V = Tensor(Dense{ElT}(vec(MV)),Vinds) - return U,S,V -end - -# eigendecomposition of an order-n tensor according to positions Lpos -# and Rpos +# eigendecomposition of an order-n tensor according to +# positions Lpos and Rpos function eigenHermitian(T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; @@ -511,47 +471,8 @@ function eigenHermitian(T::DenseTensor{<:Number,N,IndsT}, return U,D end -function eigenHermitian(T::DenseTensor{ElT,2,IndsT}; - kwargs...) where {ElT,IndsT} - truncate::Bool = get(kwargs,:truncate,false) - maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) - mindim::Int = get(kwargs,:mindim,1) - cutoff::Float64 = get(kwargs,:cutoff,0.0) - absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) - doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) - - DM,UM = eigen(Hermitian(matrix(T))) - - # Sort by largest to smallest eigenvalues - p = sortperm(DM; rev = true) - DM = DM[p] - UM = UM[:,p] - - if truncate - truncate!(DM;maxdim=maxdim, - cutoff=cutoff, - absoluteCutoff=absoluteCutoff, - doRelCutoff=doRelCutoff) - dD = length(DM) - if dD < size(UM,2) - UM = UM[:,1:dD] - end - else - dD = length(DM) - end - - # Make the new indices to go onto U and V - u = eltype(IndsT)(dD) - v = eltype(IndsT)(dD) - Uinds = IndsT((ind(T,1),u)) - Dinds = IndsT((u,v)) - U = Tensor(Dense{ElT}(vec(UM)),Uinds) - D = Tensor(Diag{Vector{real(ElT)}}(DM),Dinds) - return U,D -end - -# qr decomposition of an order-n tensor according to positions Lpos -# and Rpos +# qr decomposition of an order-n tensor according to +# positions Lpos and Rpos function LinearAlgebra.qr(T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}) where {N,IndsT,NL,NR} @@ -570,21 +491,6 @@ function LinearAlgebra.qr(T::DenseTensor{<:Number,N,IndsT}, return Q,R end -function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; - kwargs...) where {ElT,IndsT} - # TODO: just call qr on T directly (make sure - # that is fast) - QM,RM = qr(matrix(T)) - # Make the new indices to go onto Q and R - q,r = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) - Qinds = IndsT((ind(T,1),q)) - Rinds = IndsT((q,ind(T,2))) - Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) - R = Tensor(Dense{ElT}(vec(RM)),Rinds) - return Q,R -end - # polar decomposition of an order-n tensor according to positions Lpos # and Rpos function polar(T::DenseTensor{<:Number,N,IndsT}, @@ -593,6 +499,7 @@ function polar(T::DenseTensor{<:Number,N,IndsT}, M = permute_reshape(T,Lpos,Rpos) UM,PM = polar(M) + # TODO: turn these into functions Linds = similar_type(IndsT,Val(NL))(ntuple(i->inds(T)[Lpos[i]],Val(NL))) Rinds = similar_type(IndsT,Val(NR))(ntuple(i->inds(T)[Rpos[i]],Val(NR))) @@ -609,31 +516,19 @@ function polar(T::DenseTensor{<:Number,N,IndsT}, return U,P end -function polar(T::DenseTensor{ElT,2,IndsT}; - kwargs...) where {ElT,IndsT} - QM,RM = polar(matrix(T)) - dim = size(QM,2) - # Make the new indices to go onto Q and R - q = eltype(IndsT)(dim) - # TODO: use push/pushfirst instead of a constructor - # call here - Qinds = IndsT((ind(T,1),q)) - Rinds = IndsT((q,ind(T,2))) - Q = Tensor(Dense{ElT}(vec(QM)),Qinds) - R = Tensor(Dense{ElT}(vec(RM)),Rinds) - return Q,R -end - -function LinearAlgebra.exp(T::DenseTensor{ElT,N,IndsT}, +function LinearAlgebra.exp(T::DenseTensor{ElT,N}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; - ishermitian::Bool=false) where {ElT,N,IndsT,NL,NR} + ishermitian::Bool=false) where {ElT,N, + NL,NR} M = permute_reshape(T,Lpos,Rpos) + indsTp = permute(inds(T),(Lpos...,Rpos...)) if ishermitian expM = exp(Hermitian(matrix(M))) + return Tensor(Dense{ElT}(vec(expM)),indsTp) else - expM = exp(matrix(M)) + expM = exp(M) + return reshape(expM,indsTp) end - return Tensor(Dense{ElT}(vec(expM)),permute(inds(T),(Lpos...,Rpos...))) end diff --git a/src/tensor/diag.jl b/src/tensor/diag.jl index 23f104842e..0e20a0df38 100644 --- a/src/tensor/diag.jl +++ b/src/tensor/diag.jl @@ -135,11 +135,11 @@ function contraction_output(TensorT1::Type{<:DiagTensor}, return zero_contraction_output(TensorT1,TensorT2,indsR) end -function Base.Array(T::DiagTensor{ElT,N}) where {ElT,N} - return Array(dense(T)) +function array(T::DiagTensor{ElT,N}) where {ElT,N} + return array(dense(T)) end -Base.Matrix(T::DiagTensor{<:Number,2}) = Array(T) -Base.Vector(T::DiagTensor{<:Number,1}) = Array(T) +matrix(T::DiagTensor{<:Number,2}) = array(T) +vector(T::DiagTensor{<:Number,1}) = array(T) diag_length(T::DiagTensor) = minimum(dims(T)) diag_length(T::DiagTensor{<:Number,0}) = 1 diff --git a/src/tensor/linearalgebra.jl b/src/tensor/linearalgebra.jl new file mode 100644 index 0000000000..e58f316a16 --- /dev/null +++ b/src/tensor/linearalgebra.jl @@ -0,0 +1,139 @@ + +# +# Linear Algebra of order 2 Tensors +# +# Even though DenseTensor{_,2} is strided +# and passable to BLAS/LAPACK, it cannot +# be made <: StridedArray + +function Base.:*(T1::Tensor{ElT1,2,StoreT1,IndsT1}, + T2::Tensor{ElT2,2,StoreT2,IndsT2}) where + {ElT1,StoreT1<:Dense,IndsT1, + ElT2,StoreT2<:Dense,IndsT2} + RM = matrix(T1)*matrix(T2) + indsR = IndsT1(ind(T1,1),ind(T2,2)) + return Tensor(Dense{promote_type(ElT1,ElT2)}(vec(RM)),indsR) +end + +function LinearAlgebra.exp(T::DenseTensor{ElT,2}) where {ElT} + expTM = exp(matrix(T)) + return Tensor(Dense{ElT}(vec(expTM)),inds(T)) +end + +function expHermitian(T::DenseTensor{ElT,2}) where {ElT} + # exp(::Hermitian/Symmetric) returns Hermitian/Symmetric, + # so extract the parent matrix + expTM = parent(exp(Hermitian(matrix(T)))) + return Tensor(Dense{ElT}(vec(expTM)),inds(T)) +end + +# svd of an order-2 tensor +function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; + kwargs...) where {ElT,IndsT} + maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) + mindim::Int = get(kwargs,:mindim,1) + cutoff::Float64 = get(kwargs,:cutoff,0.0) + absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) + doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) + fastSVD::Bool = get(kwargs,:fastSVD,false) + + if fastSVD + MU,MS,MV = svd(matrix(T)) + else + MU,MS,MV = recursiveSVD(matrix(T)) + end + conj!(MV) + + P = MS.^2 + truncate!(P;mindim=mindim, + maxdim=maxdim, + cutoff=cutoff, + absoluteCutoff=absoluteCutoff, + doRelCutoff=doRelCutoff) + dS = length(P) + if dS < length(MS) + MU = MU[:,1:dS] + resize!(MS,dS) + MV = MV[:,1:dS] + end + + # Make the new indices to go onto U and V + u = eltype(IndsT)(dS) + v = eltype(IndsT)(dS) + Uinds = IndsT((ind(T,1),u)) + Sinds = IndsT((u,v)) + Vinds = IndsT((ind(T,2),v)) + U = Tensor(Dense{ElT}(vec(MU)),Uinds) + S = Tensor(Diag{Vector{real(ElT)}}(MS),Sinds) + V = Tensor(Dense{ElT}(vec(MV)),Vinds) + return U,S,V +end + +function eigenHermitian(T::DenseTensor{ElT,2,IndsT}; + kwargs...) where {ElT,IndsT} + ispossemidef::Bool = get(kwargs,:ispossemidef,false) + maxdim::Int = get(kwargs,:maxdim,minimum(dims(T))) + mindim::Int = get(kwargs,:mindim,1) + cutoff::Float64 = get(kwargs,:cutoff,0.0) + absoluteCutoff::Bool = get(kwargs,:absoluteCutoff,false) + doRelCutoff::Bool = get(kwargs,:doRelCutoff,true) + + DM,UM = eigen(Hermitian(matrix(T))) + + # Sort by largest to smallest eigenvalues + p = sortperm(DM; rev = true) + DM = DM[p] + UM = UM[:,p] + + if ispossemidef + truncate!(DM;maxdim=maxdim, + cutoff=cutoff, + absoluteCutoff=absoluteCutoff, + doRelCutoff=doRelCutoff) + dD = length(DM) + if dD < size(UM,2) + UM = UM[:,1:dD] + end + else + dD = length(DM) + end + + # Make the new indices to go onto U and V + u = eltype(IndsT)(dD) + v = eltype(IndsT)(dD) + Uinds = IndsT((ind(T,1),u)) + Dinds = IndsT((u,v)) + U = Tensor(Dense{ElT}(vec(UM)),Uinds) + D = Tensor(Diag{Vector{real(ElT)}}(DM),Dinds) + return U,D +end + +function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}) where {ElT, + IndsT} + # TODO: just call qr on T directly (make sure + # that is fast) + QM,RM = qr(matrix(T)) + # Make the new indices to go onto Q and R + q,r = inds(T) + q = dim(q) < dim(r) ? sim(q) : sim(r) + Qinds = IndsT((ind(T,1),q)) + Rinds = IndsT((q,ind(T,2))) + Q = Tensor(Dense{ElT}(vec(Matrix(QM))),Qinds) + R = Tensor(Dense{ElT}(vec(RM)),Rinds) + return Q,R +end + +function polar(T::DenseTensor{ElT,2,IndsT}) where {ElT,IndsT} + QM,RM = polar(matrix(T)) + dim = size(QM,2) + # Make the new indices to go onto Q and R + q = eltype(IndsT)(dim) + # TODO: use push/pushfirst instead of a constructor + # call here + Qinds = IndsT((ind(T,1),q)) + Rinds = IndsT((q,ind(T,2))) + Q = Tensor(Dense{ElT}(vec(QM)),Qinds) + R = Tensor(Dense{ElT}(vec(RM)),Rinds) + return Q,R +end + diff --git a/test/test_ctmrg.jl b/test/test_ctmrg.jl index cc09c03700..fafc55c93d 100644 --- a/test/test_ctmrg.jl +++ b/test/test_ctmrg.jl @@ -26,6 +26,7 @@ function ctmrg(T::ITensor, sr = findindex(Clu⁽¹⁾,"site,right") Ud,Cdr = eigenHermitian(Clu⁽¹⁾, (ld,sd), (lr,sr); + ispossemidef=true, maxdim=χmax, lefttags="link,down,renorm", righttags="link,right,renorm", diff --git a/test/test_itensor_dense.jl b/test/test_itensor_dense.jl index 3acd327bb5..c1f252c555 100644 --- a/test/test_itensor_dense.jl +++ b/test/test_itensor_dense.jl @@ -199,7 +199,8 @@ end Amat = reshape(Amat+Amat'+randn(4,4)*1e-10,2,2,2,2) A = ITensor(Amat,i1,i2,s1,s2) Aexp = exp(A,(i1,i2),(s1,s2),ishermitian=true) - Amatexp = reshape(Matrix(exp(Hermitian(reshape(Amat,4,4)))),2,2,2,2) + Amatexp = reshape(parent(exp(Hermitian(reshape(Amat,4,4)))), + 2,2,2,2) Aexp_from_mat = ITensor(Amatexp,i1,i2,s1,s2) @test Aexp ≈ Aexp_from_mat end From db7ffef3d06af08a84ce9df41375e185608b69c4 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 15 Oct 2019 11:57:38 -0400 Subject: [PATCH 20/27] Add benchmark executable to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 499a618326..f2de358ff4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.o *.swp +benchmark/mult From bb7a8e4c13dcd30a60d095c1b354079f22aa991b Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 15 Oct 2019 15:54:08 -0400 Subject: [PATCH 21/27] Add timers in dmrg and davidson --- src/ITensors.jl | 2 +- src/iterativesolvers.jl | 36 +++++++++++++++++++++++++++--------- src/mps/dmrg.jl | 10 ++++++++++ 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/src/ITensors.jl b/src/ITensors.jl index d5d476b212..e7c367e441 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -52,7 +52,7 @@ import Base.adjoint, # Global Variables # const warnTensorOrder = 10 -const to = TimerOutput() +const GLOBAL_TIMER = TimerOutput() ##################################### # Index and IndexSet diff --git a/src/iterativesolvers.jl b/src/iterativesolvers.jl index 97b6a698c3..7bed904fca 100644 --- a/src/iterativesolvers.jl +++ b/src/iterativesolvers.jl @@ -42,6 +42,16 @@ function orthogonalize!(q::ITensor,V,ni) return end +function expand_krylov_space(M::Matrix,V,AV,ni) + newM = fill(0.0,(ni+1,ni+1)) + newM[1:ni,1:ni] = M + for k=1:ni+1 + newM[k,ni+1] = dot(V[k],AV[ni+1]) + newM[ni+1,k] = conj(newM[k,ni+1]) + end + return newM +end + function davidson(A, phi0::ITensor; kwargs...)::Tuple{Float64,ITensor} @@ -69,8 +79,12 @@ function davidson(A, error("linear size of A and dimension of phi should match in davidson") end +@timeit_debug GLOBAL_TIMER "A(q)" begin + Aphi = A(phi) +end + V = ITensor[copy(phi)] - AV = ITensor[A(phi)] + AV = ITensor[Aphi] last_lambda = NaN lambda = dot(V[1],AV[1]) @@ -93,22 +107,26 @@ function davidson(A, last_lambda = lambda +@timeit_debug GLOBAL_TIMER "orthogonalize!" begin for pass = 1:Northo_pass orthogonalize!(q,V,ni) end +end + +@timeit_debug GLOBAL_TIMER "A(q)" begin + Aq = A(q) +end push!(V,copy(q)) - push!(AV,A(q)) + push!(AV,Aq) - newM = fill(0.0,(ni+1,ni+1)) - newM[1:ni,1:ni] = M - for k=1:ni+1 - newM[k,ni+1] = dot(V[k],AV[ni+1]) - newM[ni+1,k] = conj(newM[k,ni+1]) - end - M = newM +@timeit_debug GLOBAL_TIMER "expand_krylov_space" begin + M = expand_krylov_space(M,V,AV,ni) +end +@timeit_debug GLOBAL_TIMER "get_vecs!" begin lambda = get_vecs!((phi,q),M,V,AV,ni+1) +end end #for ni=1:actual_maxiter+1 diff --git a/src/mps/dmrg.jl b/src/mps/dmrg.jl index 1d3a4d81e3..fec9cc0a24 100644 --- a/src/mps/dmrg.jl +++ b/src/mps/dmrg.jl @@ -21,19 +21,29 @@ function dmrg(H::MPO, sw_time = @elapsed begin for (b,ha) in sweepnext(N) + +@timeit_debug GLOBAL_TIMER "position!" begin position!(PH,psi,b) +end +@timeit_debug GLOBAL_TIMER "psi[b]*psi[b+1]" begin phi = psi[b]*psi[b+1] +end +@timeit_debug GLOBAL_TIMER "davidson" begin energy,phi = davidson(PH,phi;kwargs...) +end dir = ha==1 ? "fromleft" : "fromright" + +@timeit_debug GLOBAL_TIMER "replaceBond!" begin replaceBond!(psi,b,phi; maxdim=maxdim(sweeps,sw), mindim=mindim(sweeps,sw), cutoff=cutoff(sweeps,sw), dir=dir, which_factorization=which_factorization) +end measure!(obs;energy=energy, psi=psi, From c7cab486150f70180a59b6eaf71edd3932032d2f Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 15 Oct 2019 16:43:59 -0400 Subject: [PATCH 22/27] Speed up permutedims! of DenseTensor --- src/tensor/dense.jl | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 9ba67974be..b5a2ab61fb 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -117,12 +117,12 @@ matrix(T::DenseTensor{<:Number,2}) = array(T) vector(T::DenseTensor{<:Number,1}) = array(T) # TODO: call permutedims!(R,T,perm,(r,t)->t)? -function Base.permutedims!(R::DenseTensor{<:Number,N}, - T::DenseTensor{<:Number,N}, - perm::NTuple{N,Int}) where {N} - permutedims!(array(R),array(T),perm) - return R -end +#function Base.permutedims!(R::DenseTensor{<:Number,N}, +# T::DenseTensor{<:Number,N}, +# perm::NTuple{N,Int}) where {N} +# permutedims!(array(R),array(T),perm) +# return R +#end # Version that may overwrite the result or promote # and return the result @@ -163,11 +163,13 @@ using Base.Cartesian: @nexprs, # Based off of the permutedims! implementation in Julia's base: # https://github.com/JuliaLang/julia/blob/91151ab871c7e7d6689d1cfa793c12062d37d6b6/base/multidimensional.jl#L1355 # -@generated function Base.permutedims!(TP::DenseTensor{<:Number,N}, - T::DenseTensor{<:Number,N}, +@generated function Base.permutedims!(TTP::DenseTensor{<:Number,N}, + TT::DenseTensor{<:Number,N}, perm, f::Function) where {N} quote + TP = array(TTP) + T = array(TT) Base.checkdims_perm(TP, T, perm) #calculates all the strides @@ -189,7 +191,7 @@ using Base.Cartesian: @nexprs, ind += 1 end) - return TP + return TTP end end function Base.permutedims!(TP::DenseTensor{<:Number,0}, From dae3d839d9c6daa28bd9101664c55e312098e243 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 15 Oct 2019 17:10:05 -0400 Subject: [PATCH 23/27] Make scale!(ITensor,Number) faster --- src/itensor.jl | 2 +- src/tensor/dense.jl | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/itensor.jl b/src/itensor.jl index 94fcc70d58..83a6d719e6 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -573,7 +573,7 @@ A .*= x """ function scale!(T::ITensor,x::Number) TT = tensor(T) - TT .*= x + scale!(TT,x) return T end rmul!(T::ITensor,fac::Number) = scale!(T,fac) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index b5a2ab61fb..5e5c219248 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -124,6 +124,14 @@ vector(T::DenseTensor{<:Number,1}) = array(T) # return R #end +function scale!(T::DenseTensor, + α::Number) + A = array(T) + # This is faster than A .*= α + rmul!(A,α) + return T +end + # Version that may overwrite the result or promote # and return the result # TODO: move to tensor.jl? From bd6e8706757c036ad11acce8ba503b706a6bd596 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 15 Oct 2019 17:41:04 -0400 Subject: [PATCH 24/27] Some small optimizations? --- src/tensor/dense.jl | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/tensor/dense.jl b/src/tensor/dense.jl index 5e5c219248..23ee2c7693 100644 --- a/src/tensor/dense.jl +++ b/src/tensor/dense.jl @@ -117,12 +117,12 @@ matrix(T::DenseTensor{<:Number,2}) = array(T) vector(T::DenseTensor{<:Number,1}) = array(T) # TODO: call permutedims!(R,T,perm,(r,t)->t)? -#function Base.permutedims!(R::DenseTensor{<:Number,N}, -# T::DenseTensor{<:Number,N}, -# perm::NTuple{N,Int}) where {N} -# permutedims!(array(R),array(T),perm) -# return R -#end +function Base.permutedims!(R::DenseTensor{<:Number,N}, + T::DenseTensor{<:Number,N}, + perm::NTuple{N,Int}) where {N} + permutedims!(array(R),array(T),perm) + return R +end function scale!(T::DenseTensor, α::Number) @@ -139,7 +139,13 @@ function permutedims!!(R::Tensor, T::Tensor, perm::NTuple{N,Int}, f=(r,t)->t) where {N} - permutedims!(R,T,perm,f) + if !is_trivial_permutation(perm) + permutedims!(R,T,perm,f) + else + RA = array(R) + TA = array(T) + RA .= f.(RA,TA) + end return R end @@ -295,6 +301,7 @@ function contract!!(R::Tensor{<:Number,NR}, return R end +# TODO: move to tensor.jl? Base.copyto!(R::Tensor,T::Tensor) = copyto!(store(R),store(T)) # Move to tensor.jl? Overload this function @@ -328,10 +335,14 @@ function _contract!(R::DenseTensor, return R end -function _contract!(C::DenseTensor{El,NC}, - A::DenseTensor{El,NA}, - B::DenseTensor{El,NB}, +function _contract!(CT::DenseTensor{El,NC}, + AT::DenseTensor{El,NA}, + BT::DenseTensor{El,NB}, props::ContractionProperties) where {El,NC,NA,NB} + C = array(CT) + A = array(AT) + B = array(BT) + tA = 'N' if props.permuteA AM = reshape(permutedims(A,NTuple{NA,Int}(props.PA)),props.dmid,props.dleft) From 3b6b9e1814b422b5d4e480931e496736b17aa24b Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Tue, 15 Oct 2019 18:25:19 -0400 Subject: [PATCH 25/27] Some hints for type inference --- src/itensor.jl | 32 ++++++++++++++++---------------- src/iterativesolvers.jl | 24 ++++++++++++------------ 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/itensor.jl b/src/itensor.jl index 83a6d719e6..6c4427e47c 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -44,12 +44,12 @@ setstore!(T::ITensor,st::TensorStorage) = (T.store = st) # Dense ITensor constructors # -ITensor(T::Tensor) = ITensor(store(T),inds(T)) -ITensor{N}(T::Tensor{<:Number,N}) where {N} = ITensor(store(T),inds(T)) +ITensor(T::Tensor{<:Number,N}) where {N} = ITensor{N}(store(T),inds(T)) +ITensor{N}(T::Tensor{<:Number,N}) where {N} = ITensor{N}(store(T),inds(T)) -ITensor() = ITensor(Dense{Nothing}(),IndexSet()) +ITensor() = ITensor{0}(Dense{Nothing}(),IndexSet()) ITensor(is::IndexSet) = ITensor(Float64,is...) -ITensor(inds::Index...) = ITensor(IndexSet(inds...)) +ITensor(inds::Vararg{Index,N}) where {N} = ITensor(IndexSet{N}(inds...)) # Convert the ITensor to a Tensor that shares the same # data and indices as the ITensor @@ -58,26 +58,26 @@ ITensor(inds::Index...) = ITensor(IndexSet(inds...)) tensor(A::ITensor) = Tensor(store(A),inds(A)) function ITensor(::Type{T}, - inds::IndexSet) where {T<:Number} - return ITensor(Dense{float(T)}(zeros(float(T),dim(inds))),inds) + inds::IndexSet{N}) where {T<:Number,N} + return ITensor{N}(Dense{float(T)}(zeros(float(T),dim(inds))),inds) end ITensor(::Type{T},inds::Index...) where {T<:Number} = ITensor(T,IndexSet(inds...)) function ITensor(::UndefInitializer, - inds::IndexSet) - return ITensor(Dense{Float64}(Vector{Float64}(undef,dim(inds))),inds) + inds::IndexSet{N}) where {N} + return ITensor{N}(Dense{Float64}(Vector{Float64}(undef,dim(inds))),inds) end ITensor(x::UndefInitializer,inds::Index...) = ITensor(x,IndexSet(inds...)) -function ITensor(x::S,inds::IndexSet) where {S<:Number} - return ITensor(Dense{float(S)}(fill(float(x),dim(inds))),inds) +function ITensor(x::S,inds::IndexSet{N}) where {S<:Number,N} + return ITensor{N}(Dense{float(S)}(fill(float(x),dim(inds))),inds) end ITensor(x::S,inds::Index...) where {S<:Number} = ITensor(x,IndexSet(inds...)) #TODO: check that the size of the Array matches the Index dimensions -function ITensor(A::Array{S},inds::IndexSet) where {S<:Number} - length(A) ≠ dim(inds) && throw(DimensionMismatch("In ITensor(Array,IndexSet), length of Array ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))")) - return ITensor(Dense{float(S)}(float(vec(A))),inds) +function ITensor(A::Array{S,N},inds::IndexSet{N}) where {S<:Number,N} + length(A) ≠ dim(inds) && throw(DimensionMismatch("In ITensor(Array,IndexSet), length of Array ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))")) + return ITensor{N}(Dense{float(S)}(float(vec(A))),inds) end ITensor(A::Array{S},inds::Index...) where {S<:Number} = ITensor(A,IndexSet(inds...)) @@ -93,8 +93,8 @@ only along the diagonal. Defaults to having `zero(T)` along the diagonal. The storage will have Diag type. """ function diagITensor(::Type{T}, - is::IndexSet) where {T<:Number} - return ITensor(Diag{Vector{T}}(zeros(T,minDim(is))),is) + is::IndexSet{N}) where {T<:Number,N} + return ITensor{N}(Diag{Vector{T}}(zeros(T,minDim(is))),is) end """ @@ -272,7 +272,7 @@ function matrix(T::ITensor{N},i1::Index,i2::Index) where {N} return array(T,i1,i2) end -Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = tensor(T)[vals...] +Base.getindex(T::ITensor{N},vals::Vararg{Int,N}) where {N} = tensor(T)[vals...]::Number function Base.getindex(T::ITensor{N}, ivs::Vararg{IndexVal,N}) where {N} diff --git a/src/iterativesolvers.jl b/src/iterativesolvers.jl index 7bed904fca..0e293aee51 100644 --- a/src/iterativesolvers.jl +++ b/src/iterativesolvers.jl @@ -43,18 +43,18 @@ function orthogonalize!(q::ITensor,V,ni) end function expand_krylov_space(M::Matrix,V,AV,ni) - newM = fill(0.0,(ni+1,ni+1)) - newM[1:ni,1:ni] = M - for k=1:ni+1 - newM[k,ni+1] = dot(V[k],AV[ni+1]) - newM[ni+1,k] = conj(newM[k,ni+1]) - end - return newM + newM = fill(0.0,(ni+1,ni+1)) + newM[1:ni,1:ni] = M + for k=1:ni+1 + newM[k,ni+1] = dot(V[k],AV[ni+1]) + newM[ni+1,k] = conj(newM[k,ni+1]) + end + return newM end function davidson(A, - phi0::ITensor; - kwargs...)::Tuple{Float64,ITensor} + phi0::ITensorT; + kwargs...) where {ITensorT<:ITensor} phi = copy(phi0) @@ -83,11 +83,11 @@ function davidson(A, Aphi = A(phi) end - V = ITensor[copy(phi)] - AV = ITensor[Aphi] + V = ITensorT[copy(phi)] + AV = ITensorT[Aphi] last_lambda = NaN - lambda = dot(V[1],AV[1]) + lambda::Float64 = dot(V[1],AV[1]) q = AV[1] - lambda*V[1]; M = fill(lambda,(1,1)) From 401dc0cee50df12b6fc5b13fc3d3d228aa8d55a3 Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Wed, 16 Oct 2019 13:39:22 -0400 Subject: [PATCH 26/27] Fix TRG/CTMRG unit tests --- src/itensor.jl | 3 +-- test/2d_classical_ising.jl | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/itensor.jl b/src/itensor.jl index 6c4427e47c..f142f0f2fd 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -74,8 +74,7 @@ function ITensor(x::S,inds::IndexSet{N}) where {S<:Number,N} end ITensor(x::S,inds::Index...) where {S<:Number} = ITensor(x,IndexSet(inds...)) -#TODO: check that the size of the Array matches the Index dimensions -function ITensor(A::Array{S,N},inds::IndexSet{N}) where {S<:Number,N} +function ITensor(A::Array{S},inds::IndexSet{N}) where {S<:Number,N} length(A) ≠ dim(inds) && throw(DimensionMismatch("In ITensor(Array,IndexSet), length of Array ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))")) return ITensor{N}(Dense{float(S)}(float(vec(A))),inds) end diff --git a/test/2d_classical_ising.jl b/test/2d_classical_ising.jl index 450a0f53b4..ef9c68750c 100644 --- a/test/2d_classical_ising.jl +++ b/test/2d_classical_ising.jl @@ -33,10 +33,10 @@ function ising_mpo(sh::Tuple{Index,Index},sv::Tuple{Index,Index}, Q = [exp(β*J) exp(-β*J); exp(-β*J) exp(β*J)] D,U = eigen(Symmetric(Q)) √Q = U*Diagonal(sqrt.(D))*U' - Xh1 = ITensor(vec(√Q),sh[1],sh[1]') - Xh2 = ITensor(vec(√Q),sh[2],sh[2]') - Xv1 = ITensor(vec(√Q),sv[1],sv[1]') - Xv2 = ITensor(vec(√Q),sv[2],sv[2]') + Xh1 = ITensor(√Q,sh[1],sh[1]') + Xh2 = ITensor(√Q,sh[2],sh[2]') + Xv1 = ITensor(√Q,sv[1],sv[1]') + Xv2 = ITensor(√Q,sv[2],sv[2]') T = noprime(T*Xh1*Xh2*Xv1*Xv2) else sig(s) = 1.0-2.0*(s-1) From 84bf2e7b8712932fcea383c32192f6d9d98020ed Mon Sep 17 00:00:00 2001 From: Matthew Fishman Date: Wed, 16 Oct 2019 16:17:46 -0400 Subject: [PATCH 27/27] Remove getindex(::Index,::Colon) test --- test/test_index.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_index.jl b/test/test_index.jl index 0925340d3e..12605ffeec 100644 --- a/test/test_index.jl +++ b/test/test_index.jl @@ -64,7 +64,7 @@ import ITensors: In,Out,Neither @test plev(i(2)') == 1 @test val(i(2)') == 2 @test plev(prime(i(2),4)) == 4 - @test i[:] == [i(1); i(2)] + #@test i[:] == [i(1); i(2)] @test sprint(show, i(2)) == sprint(show, i)*"=2" @test IndexVal() == IndexVal(Index(),1)