From f10e2d38dfa9949077ee3bf7a73a87f9d8c6ecc8 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sun, 13 Nov 2022 15:50:32 -0600 Subject: [PATCH 01/90] Refactor: Breakout index fixing in qr() for empty left/right indices Try and reduce complexity prior to implementing qr with combiners. --- src/tensor_operations/matrix_decomposition.jl | 58 ++++++++++++------- test/decomp.jl | 37 ++++++++++++ 2 files changed, 73 insertions(+), 22 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 01c3dfafb9..57d19dcfc7 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -360,6 +360,35 @@ function noinds_error_message(decomp::String) treating the ITensor as a matrix from the primed to the unprimed indices." end +function add_trivial_index(A::ITensor,Ainds) + α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1) + vα = onehot(eltype(A), α => 1) + A *= vα + return A,vα,[α] +end + +function add_trivial_index(A::ITensor,Linds,Rinds) + if isempty(Linds) + A,vα,Linds=add_trivial_index(A,Rinds) + elseif isempty(Rinds) + A,vα,Rinds=add_trivial_index(A,Linds) + else + vα=nothing + end + return A,vα,Linds,Rinds +end + +function remove_trivial_index(Q::ITensor,R::ITensor,vα::ITensor) + if length(inds(Q))==2 #should have only dummy + qr,Link + Q*=dag(vα) + elseif length(inds(R))==2 #should have only dummy + qr,Link + R*=dag(vα) + else + @error "Should be impossible" + end + return Q,R +end + qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) # TODO: write this in terms of combiners and then @@ -368,22 +397,12 @@ function qr(A::ITensor, Linds...; kwargs...) tags::TagSet = get(kwargs, :tags, "Link,qr") Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) - - Lis_original = Lis - Ris_original = Ris - if isempty(Lis_original) - α = trivial_index(Ris) - vLα = onehot(eltype(A), α => 1) - A *= vLα - Lis = [α] - end - if isempty(Ris_original) - α = trivial_index(Lis) - vRα = onehot(eltype(A), α => 1) - A *= vRα - Ris = [α] - end - + lre = isempty(Lis) || isempty(Ris) + + # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never + # be empty. A essentially becomes 1D after collection. + if (lre) A,vα,Lis,Ris=add_trivial_index(A,Lis,Ris) end + Lpos, Rpos = NDTensors.getperms(inds(A), Lis, Ris) QT, RT = qr(tensor(A), Lpos, Rpos; kwargs...) Q, R = itensor(QT), itensor(RT) @@ -392,12 +411,7 @@ function qr(A::ITensor, Linds...; kwargs...) settags!(R, tags, q) q = settags(q, tags) - if isempty(Lis_original) - Q *= dag(vLα) - end - if isempty(Ris_original) - R *= dag(vRα) - end + if (lre) Q,R = remove_trivial_index(Q,R,vα) end return Q, R, q end diff --git a/test/decomp.jl b/test/decomp.jl index c8ee14e25f..a53debf2f6 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -49,6 +49,43 @@ using ITensors, LinearAlgebra, Test ) end + @testset "QR dense on MPS tensor with all possible collections on Q,R" for ninds in [0,1,2,3] + l = Index(5, "l") + s = Index(2, "s") + r = Index(10, "r") + A = randomITensor(l, s, r) + Ainds=inds(A) + Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 3-ninds+1 + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + end + + @testset "QR dense on MP0 tensor with all possible collections on Q,R" for ninds in [0,1,2,3,4] + l = Index(5, "l") + s = Index(2, "s") + r = Index(10, "r") + A = randomITensor(l, s, s', r) + Ainds=inds(A) + Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 4-ninds+1 + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + end + + #= @testset "QR block sparse with (l,s) collection on Q" begin + l = Index(QN("Sz", 0) => 5; tags="l") + s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") + r = Index(QN("Sz", 0) => 5; tags="r") + A = randomITensor(l, s, r) + @show A + Q, R, q= qr(A,l,s) + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + end =# + @testset "factorize with QR" begin l = Index(5, "l") s = Index(2, "s") From cd88e807b055942ffa852b484f89ad144147e54b Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 14 Nov 2022 09:57:57 -0600 Subject: [PATCH 02/90] qr decomp, switch to using combiners for gathering indices. --- src/tensor_operations/matrix_decomposition.jl | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 57d19dcfc7..384280b380 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -403,16 +403,32 @@ function qr(A::ITensor, Linds...; kwargs...) # be empty. A essentially becomes 1D after collection. if (lre) A,vα,Lis,Ris=add_trivial_index(A,Lis,Ris) end - Lpos, Rpos = NDTensors.getperms(inds(A), Lis, Ris) - QT, RT = qr(tensor(A), Lpos, Rpos; kwargs...) - Q, R = itensor(QT), itensor(RT) + CL = combiner(Lis...) + CR = combiner(Ris...) + AC = A * CR * CL + + cL = combinedind(CL) + cR = combinedind(CR) + + if inds(AC) != IndexSet(cL, cR) + AC = permute(AC, cL, cR) + end + + QT, RT = qr(tensor(AC); kwargs...) + QC, RC = itensor(QT), itensor(RT) + Q = QC * dag(CL) + R = RC * dag(CR) + + # Conditionally remove dummy index. + if (lre) Q,R = remove_trivial_index(Q,R,vα) end + + q = commonind(Q, R) settags!(Q, tags, q) settags!(R, tags, q) q = settags(q, tags) - if (lre) Q,R = remove_trivial_index(Q,R,vα) end - + return Q, R, q end From 9de2ff6b705f4b95b23ce81ad60f19357264ace0 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 16 Nov 2022 09:27:52 -0600 Subject: [PATCH 03/90] Handle empty index collections on Q or R. Also demonstrate index collection that fails for block sparse QR. --- src/tensor_operations/matrix_decomposition.jl | 191 ++++++++++++++---- test/decomp.jl | 43 +++- 2 files changed, 190 insertions(+), 44 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 384280b380..e66508cbe8 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -360,6 +360,124 @@ function noinds_error_message(decomp::String) treating the ITensor as a matrix from the primed to the unprimed indices." end + +#QR a block sparse Rank 2 tensor. +function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, StoreT,IndsT} + Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) + Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) + + for (jj,b) in enumerate(eachnzblock(T)) + blockT = blockview(T,b) + QRb = qr(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + + if(isnothing(QRb)) + return nothing + end + + Q, R = QRb + Qs[jj] = Q + Rs[jj] = R + + end + + # getting total number of blocks + nnzblocksT = nnzblocks(T) + nzblocksT = nzblocks(T) + + nb1_lt_nb2 = ( + nblocks(T)[1] < nblocks(T)[2] || + (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) + ) + + # setting the right index of the Q isometry, this should be + # the smaller index of the two indices of of T + qindl = ind(T,1) + if nb1_lt_nb2 + qindr = sim(ind(T, 1)) + else + qindr = sim(ind(T, 2)) + end + + # can qindr have more blocks than T? + if nblocks(qindr) > nnzblocksT + resize!(qindr, nnzblocksT) + end + + for n in 1:nnzblocksT + q_dim_red = minimum(dims(Rs[n])) + NDTensors.setblockdim!(qindr, q_dim_red, n) + end + + # correcting the direction of the arrow + # since qind2r is basically a copy of qind1r + # if one have to be corrected the other one + # should also be corrected + if(dir(qindr) != dir(qindl)) + qindr = dag(qindr) + end + + indsQ = setindex(inds(T), dag(qindr), 2) + + # R left index + rindl = qindr + rindr = ind(T,2) + + # if(dir(rindl) != dir(rindr)) + # rindl = dag(rindl) + # end + + indsR = setindex(inds(T), qindr, 1) + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) + nzblocksR = Vector{Block{2}}(undef, nnzblocksT) + + for n in 1:nnzblocksT + blockT = nzblocksT[n] + + blockQ = (blockT[1], UInt(n)) + nzblocksQ[n] = blockQ + + blockR = (blockT[2], UInt(n)) + nzblocksR[n] = blockR + end + + Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) + R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) + + + for n in 1:nnzblocksT + Qb, Rb = Qs[n], Rs[n] + blockQ = nzblocksQ[n] + blockR = nzblocksR[n] + + if VERSION < v"1.5" + # In v1.3 and v1.4 of Julia, Ub has + # a very complicated view wrapper that + # can't be handled efficiently + Qb = copy(Qb) + Rb = copy(Vb) + end + + blockview(Q, blockQ) .= Qb + blockview(R, blockR) .= Rb + end + + # correcting the fluxes of the + # two tensors, such that + # Q has 0 flux for all blocks + # and R has the total flux of the system + for b in nzblocks(Q) + i1 = inds(Q)[1] + i2 = inds(Q)[2] + r1 = inds(R)[1] + newqn = -dir(i2) * flux(i1 => Block(b[1])) + ITensors.setblockqn!(i2, newqn, b[2]) + ITensors.setblockqn!(r1, newqn, b[2]) + end + + return Q, R +end + + function add_trivial_index(A::ITensor,Ainds) α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1) vα = onehot(eltype(A), α => 1) @@ -368,67 +486,66 @@ function add_trivial_index(A::ITensor,Ainds) end function add_trivial_index(A::ITensor,Linds,Rinds) + vαl,vαr=nothing,nothing if isempty(Linds) - A,vα,Linds=add_trivial_index(A,Rinds) - elseif isempty(Rinds) - A,vα,Rinds=add_trivial_index(A,Linds) - else - vα=nothing + A,vαl,Linds=add_trivial_index(A,Rinds) + end + if isempty(Rinds) + A,vαr,Rinds=add_trivial_index(A,Linds) end - return A,vα,Linds,Rinds + return A,vαl,vαr,Linds,Rinds end -function remove_trivial_index(Q::ITensor,R::ITensor,vα::ITensor) - if length(inds(Q))==2 #should have only dummy + qr,Link - Q*=dag(vα) - elseif length(inds(R))==2 #should have only dummy + qr,Link - R*=dag(vα) - else - @error "Should be impossible" +function remove_trivial_index(Q::ITensor,R::ITensor,vαl,vαr) + if !isnothing(vαl) #should have only dummy + qr,Link + Q*=dag(vαl) + end + if !isnothing(vαr) #should have only dummy + qr,Link + R*=dag(vαr) end return Q,R end qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) -# TODO: write this in terms of combiners and then -# call qr on the order-2 tensors directly function qr(A::ITensor, Linds...; kwargs...) - tags::TagSet = get(kwargs, :tags, "Link,qr") + qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) lre = isempty(Lis) || isempty(Ris) - # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never # be empty. A essentially becomes 1D after collection. - if (lre) A,vα,Lis,Ris=add_trivial_index(A,Lis,Ris) end + if (lre) A,vαl,vαr,Lis,Ris=add_trivial_index(A,Lis,Ris) end - CL = combiner(Lis...) - CR = combiner(Ris...) + # + # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. + # + CL,CR = combiner(Lis...),combiner(Ris...) + cL,cR = combinedind(CL),combinedind(CR) AC = A * CR * CL - - cL = combinedind(CL) - cR = combinedind(CR) - + # + # Make sure we don't accidentally pass the transpose into the matrix qr routine. + # if inds(AC) != IndexSet(cL, cR) AC = permute(AC, cL, cR) end - + # qr the matrix. QT, RT = qr(tensor(AC); kwargs...) - QC, RC = itensor(QT), itensor(RT) - Q = QC * dag(CL) - R = RC * dag(CR) - - # Conditionally remove dummy index. - if (lre) Q,R = remove_trivial_index(Q,R,vα) end - - + # + # Undo the combine oepration, to recover all tensor indices. + # + Q, R = itensor(QT) * dag(CL), itensor(RT)* dag(CR) + + # Conditionally remove dummy indices. + if (lre) Q,R = remove_trivial_index(Q,R,vαl,vαr) end + # + # fix up the tag name for the index between Q and R. + # q = commonind(Q, R) - settags!(Q, tags, q) - settags!(R, tags, q) - q = settags(q, tags) + settags!(Q, qtag, q) + settags!(R, qtag, q) + q = settags(q, qtag) - return Q, R, q end diff --git a/test/decomp.jl b/test/decomp.jl index a53debf2f6..fd9ab46ed4 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -75,16 +75,45 @@ using ITensors, LinearAlgebra, Test @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - #= @testset "QR block sparse with (l,s) collection on Q" begin - l = Index(QN("Sz", 0) => 5; tags="l") + @testset "QR block sparse with (l) collection on Q" begin + l = Index(QN("Sz", 0) => 3; tags="l") s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") - r = Index(QN("Sz", 0) => 5; tags="r") - A = randomITensor(l, s, r) - @show A + r = Index(QN("Sz", 0) => 3; tags="r") + A = randomITensor(l, s,dag(s'), r) + @test flux(A)==QN("Sz", 0) + Q, R, q= qr(A,l) + @test A ≈ Q * R atol = 1e-13 + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. + # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. + # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 + @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + end + @testset "QR block sparse with (l,s) collection on Q" begin + l = Index(QN("Sz", 0) => 3; tags="l") + s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") + r = Index(QN("Sz", 0) => 3; tags="r") + A = randomITensor(l, s,dag(s'), r) + @test flux(A)==QN("Sz", 0) Q, R, q= qr(A,l,s) @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end =# + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. + # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. + # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 + @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + end + @testset "QR block sparse with (l,s,r) collection on Q" begin + l = Index(QN("Sz", 0) => 3; tags="l") + s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") + r = Index(QN("Sz", 0) => 3; tags="r") + A = randomITensor(l, s,dag(s'), r) + @test flux(A)==QN("Sz", 0) + Q, R, q= qr(A,l,s,r) + @test A ≈ Q * R atol = 1e-13 + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. + # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. + # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 + @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + end @testset "factorize with QR" begin l = Index(5, "l") From dfb66e0cd1e86bbaae0f51bff9c5445881f17d8f Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 16 Nov 2022 10:17:35 -0600 Subject: [PATCH 04/90] Fix known block sparse failing use case ...With help from Niklas Tausendpfundt --- src/tensor_operations/matrix_decomposition.jl | 27 ++++++------- test/decomp.jl | 38 ++++++++----------- 2 files changed, 26 insertions(+), 39 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index e66508cbe8..a073f20649 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -363,8 +363,13 @@ end #QR a block sparse Rank 2 tensor. function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, StoreT,IndsT} - Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) - Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) + + # getting total number of blocks + nnzblocksT = nnzblocks(T) + nzblocksT = nzblocks(T) + + Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) for (jj,b) in enumerate(eachnzblock(T)) blockT = blockview(T,b) @@ -380,9 +385,6 @@ function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, Stor end - # getting total number of blocks - nnzblocksT = nnzblocks(T) - nzblocksT = nzblocks(T) nb1_lt_nb2 = ( nblocks(T)[1] < nblocks(T)[2] || @@ -417,16 +419,8 @@ function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, Stor end indsQ = setindex(inds(T), dag(qindr), 2) - - # R left index - rindl = qindr - rindr = ind(T,2) - - # if(dir(rindl) != dir(rindr)) - # rindl = dag(rindl) - # end - indsR = setindex(inds(T), qindr, 1) + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksR = Vector{Block{2}}(undef, nnzblocksT) @@ -436,10 +430,10 @@ function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, Stor blockQ = (blockT[1], UInt(n)) nzblocksQ[n] = blockQ - blockR = (blockT[2], UInt(n)) + blockR = (UInt(n), blockT[2]) nzblocksR[n] = blockR end - + Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) @@ -478,6 +472,7 @@ function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, Stor end + function add_trivial_index(A::ITensor,Ainds) α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1) vα = onehot(eltype(A), α => 1) diff --git a/test/decomp.jl b/test/decomp.jl index fd9ab46ed4..af4fd6cb98 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -75,39 +75,31 @@ using ITensors, LinearAlgebra, Test @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - @testset "QR block sparse with (l) collection on Q" begin - l = Index(QN("Sz", 0) => 3; tags="l") + @testset "QR block sparse on MPS tensor with all possible collections on Q,R" for ninds in [0,1,2,3] + l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") - A = randomITensor(l, s,dag(s'), r) - @test flux(A)==QN("Sz", 0) - Q, R, q= qr(A,l) - @test A ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - @testset "QR block sparse with (l,s) collection on Q" begin - l = Index(QN("Sz", 0) => 3; tags="l") - s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") - r = Index(QN("Sz", 0) => 3; tags="r") - A = randomITensor(l, s,dag(s'), r) - @test flux(A)==QN("Sz", 0) - Q, R, q= qr(A,l,s) + A = randomITensor(l, s, r) + Ainds=inds(A) + Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 3-ninds+1 @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR block sparse with (l,s,r) collection on Q" begin - l = Index(QN("Sz", 0) => 3; tags="l") + + @testset "QR block sparse on MPO tensor with all possible collections on Q,R" for ninds in [0,1,2,3,4] + l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") - A = randomITensor(l, s,dag(s'), r) - @test flux(A)==QN("Sz", 0) - Q, R, q= qr(A,l,s,r) + A = randomITensor(l, s, dag(s'), r) + Ainds=inds(A) + Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 4-ninds+1 @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. From 9752128d59a320c217cb3448f390a204af2622f8 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 16 Nov 2022 12:22:02 -0600 Subject: [PATCH 05/90] Add attribution for Niklas --- src/tensor_operations/matrix_decomposition.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index a073f20649..30f191c00b 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -362,6 +362,9 @@ end #QR a block sparse Rank 2 tensor. +# +# This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb +# function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, StoreT,IndsT} # getting total number of blocks From 253b5ec13c7b936b7f6804ca4b83bd2ebe7173d7 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 16 Nov 2022 12:49:49 -0600 Subject: [PATCH 06/90] Add qr test on autoMPO generated block sparse tensors Heisenberg Hamiltonian --- test/decomp.jl | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/test/decomp.jl b/test/decomp.jl index af4fd6cb98..af18a82445 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -106,7 +106,29 @@ using ITensors, LinearAlgebra, Test # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - + + @testset "QR Heisenberg MPO tensors" begin + N = 4 + sites = siteinds("S=1", N; conserve_qns=true) + ampo = OpSum() + for j in 1:(N - 1) + ampo .+= 0.5, "S+", j, "S-", j + 1 + ampo .+= 0.5, "S-", j, "S+", j + 1 + ampo .+= "Sz", j, "Sz", j + 1 + end + H = MPO(ampo, sites; splitblocks=false) + for n in 1:N-1 + W=H[n] + ilr=filterinds(W,tags="l=$n")[1] + ilq=noncommoninds(W,ilr) + Q,R,q=qr(W,ilq) + @test W ≈ Q * R atol = 1e-13 + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. + # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. + # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 + @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + end + end @testset "factorize with QR" begin l = Index(5, "l") s = Index(2, "s") From c43814fd8b313c43f35062c615722eea8fe2f0ff Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 16 Nov 2022 16:17:07 -0600 Subject: [PATCH 07/90] Move block sparse QR into NDTensors layer where it belongs. --- NDTensors/src/blocksparse/linearalgebra.jl | 103 ++++++++++++++ src/tensor_operations/matrix_decomposition.jl | 127 +++--------------- 2 files changed, 118 insertions(+), 112 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 7814239716..e07b9f1adb 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,6 +296,109 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end +#QR a block sparse Rank 2 tensor. +# +# This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb +# +function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, StoreT,IndsT} + + # getting total number of blocks + nnzblocksT = nnzblocks(T) + nzblocksT = nzblocks(T) + + Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + + for (jj,b) in enumerate(eachnzblock(T)) + blockT = blockview(T,b) + QRb = qr(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + + if(isnothing(QRb)) + return nothing + end + + Q, R = QRb + Qs[jj] = Q + Rs[jj] = R + + end + + + nb1_lt_nb2 = ( + nblocks(T)[1] < nblocks(T)[2] || + (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) + ) + + # setting the right index of the Q isometry, this should be + # the smaller index of the two indices of of T + qindl = ind(T,1) + if nb1_lt_nb2 + qindr = sim(ind(T, 1)) + else + qindr = sim(ind(T, 2)) + end + + # can qindr have more blocks than T? + if nblocks(qindr) > nnzblocksT + resize!(qindr, nnzblocksT) + end + + for n in 1:nnzblocksT + q_dim_red = minimum(dims(Rs[n])) + NDTensors.setblockdim!(qindr, q_dim_red, n) + end + + # correcting the direction of the arrow + # since qind2r is basically a copy of qind1r + # if one have to be corrected the other one + # should also be corrected + if(dir(qindr) != dir(qindl)) + qindr = dag(qindr) + end + + indsQ = setindex(inds(T), dag(qindr), 2) + indsR = setindex(inds(T), qindr, 1) + + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) + nzblocksR = Vector{Block{2}}(undef, nnzblocksT) + + for n in 1:nnzblocksT + blockT = nzblocksT[n] + + blockQ = (blockT[1], UInt(n)) + nzblocksQ[n] = blockQ + + blockR = (UInt(n), blockT[2]) + nzblocksR[n] = blockR + end + + Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) + R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) + + + for n in 1:nnzblocksT + Qb, Rb = Qs[n], Rs[n] + blockQ = nzblocksQ[n] + blockR = nzblocksR[n] + + if VERSION < v"1.5" + # In v1.3 and v1.4 of Julia, Ub has + # a very complicated view wrapper that + # can't be handled efficiently + Qb = copy(Qb) + Rb = copy(Vb) + end + + blockview(Q, blockQ) .= Qb + blockview(R, blockR) .= Rb + end + + + + return Q, R +end + + function exp( T::Union{BlockSparseMatrix{ElT},Hermitian{ElT,<:BlockSparseMatrix{ElT}}} ) where {ElT<:Union{Real,Complex}} diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 30f191c00b..86fdf2eae6 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -361,118 +361,6 @@ function noinds_error_message(decomp::String) end -#QR a block sparse Rank 2 tensor. -# -# This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb -# -function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, StoreT,IndsT} - - # getting total number of blocks - nnzblocksT = nnzblocks(T) - nzblocksT = nzblocks(T) - - Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - - for (jj,b) in enumerate(eachnzblock(T)) - blockT = blockview(T,b) - QRb = qr(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 - - if(isnothing(QRb)) - return nothing - end - - Q, R = QRb - Qs[jj] = Q - Rs[jj] = R - - end - - - nb1_lt_nb2 = ( - nblocks(T)[1] < nblocks(T)[2] || - (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) - ) - - # setting the right index of the Q isometry, this should be - # the smaller index of the two indices of of T - qindl = ind(T,1) - if nb1_lt_nb2 - qindr = sim(ind(T, 1)) - else - qindr = sim(ind(T, 2)) - end - - # can qindr have more blocks than T? - if nblocks(qindr) > nnzblocksT - resize!(qindr, nnzblocksT) - end - - for n in 1:nnzblocksT - q_dim_red = minimum(dims(Rs[n])) - NDTensors.setblockdim!(qindr, q_dim_red, n) - end - - # correcting the direction of the arrow - # since qind2r is basically a copy of qind1r - # if one have to be corrected the other one - # should also be corrected - if(dir(qindr) != dir(qindl)) - qindr = dag(qindr) - end - - indsQ = setindex(inds(T), dag(qindr), 2) - indsR = setindex(inds(T), qindr, 1) - - nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) - nzblocksR = Vector{Block{2}}(undef, nnzblocksT) - - for n in 1:nnzblocksT - blockT = nzblocksT[n] - - blockQ = (blockT[1], UInt(n)) - nzblocksQ[n] = blockQ - - blockR = (UInt(n), blockT[2]) - nzblocksR[n] = blockR - end - - Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) - R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) - - - for n in 1:nnzblocksT - Qb, Rb = Qs[n], Rs[n] - blockQ = nzblocksQ[n] - blockR = nzblocksR[n] - - if VERSION < v"1.5" - # In v1.3 and v1.4 of Julia, Ub has - # a very complicated view wrapper that - # can't be handled efficiently - Qb = copy(Qb) - Rb = copy(Vb) - end - - blockview(Q, blockQ) .= Qb - blockview(R, blockR) .= Rb - end - - # correcting the fluxes of the - # two tensors, such that - # Q has 0 flux for all blocks - # and R has the total flux of the system - for b in nzblocks(Q) - i1 = inds(Q)[1] - i2 = inds(Q)[2] - r1 = inds(R)[1] - newqn = -dir(i2) * flux(i1 => Block(b[1])) - ITensors.setblockqn!(i2, newqn, b[2]) - ITensors.setblockqn!(r1, newqn, b[2]) - end - - return Q, R -end @@ -529,6 +417,21 @@ function qr(A::ITensor, Linds...; kwargs...) end # qr the matrix. QT, RT = qr(tensor(AC); kwargs...) + + # We need a use case that fails without this code. + # correcting the fluxes of the two tensors, such that Q has 0 flux for all blocks + # and R has the total flux of the system + # if hasqns(AC) + # for b in nzblocks(QT) + # i1 = inds(QT)[1] + # i2 = inds(QT)[2] + # r1 = inds(RT)[1] + # newqn = -dir(i2) * flux(i1 => Block(b[1])) + # ITensors.setblockqn!(i2, newqn, b[2]) + # ITensors.setblockqn!(r1, newqn, b[2]) + # end + # end + # # Undo the combine oepration, to recover all tensor indices. # From 24c2ff7a46ee51427f0f19745f368a68c9a3b372 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 16 Nov 2022 16:50:26 -0600 Subject: [PATCH 08/90] Run the formatter --- NDTensors/src/blocksparse/linearalgebra.jl | 124 +++++++++--------- src/tensor_operations/matrix_decomposition.jl | 44 +++---- test/decomp.jl | 72 +++++----- 3 files changed, 121 insertions(+), 119 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index e07b9f1adb..511e694904 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -300,105 +300,99 @@ end # # This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where{ElT, StoreT,IndsT} - +function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where {ElT,StoreT,IndsT} + # getting total number of blocks nnzblocksT = nnzblocks(T) - nzblocksT = nzblocks(T) - + nzblocksT = nzblocks(T) + Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - - for (jj,b) in enumerate(eachnzblock(T)) - blockT = blockview(T,b) - QRb = qr(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 - - if(isnothing(QRb)) - return nothing - end - - Q, R = QRb - Qs[jj] = Q - Rs[jj] = R - - end - - + + for (jj, b) in enumerate(eachnzblock(T)) + blockT = blockview(T, b) + QRb = qr(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + + if (isnothing(QRb)) + return nothing + end + + Q, R = QRb + Qs[jj] = Q + Rs[jj] = R + end + nb1_lt_nb2 = ( - nblocks(T)[1] < nblocks(T)[2] || - (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) - ) + nblocks(T)[1] < nblocks(T)[2] || + (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) + ) # setting the right index of the Q isometry, this should be # the smaller index of the two indices of of T - qindl = ind(T,1) + qindl = ind(T, 1) if nb1_lt_nb2 - qindr = sim(ind(T, 1)) + qindr = sim(ind(T, 1)) else - qindr = sim(ind(T, 2)) + qindr = sim(ind(T, 2)) end - + # can qindr have more blocks than T? if nblocks(qindr) > nnzblocksT - resize!(qindr, nnzblocksT) + resize!(qindr, nnzblocksT) end - + for n in 1:nnzblocksT - q_dim_red = minimum(dims(Rs[n])) - NDTensors.setblockdim!(qindr, q_dim_red, n) + q_dim_red = minimum(dims(Rs[n])) + NDTensors.setblockdim!(qindr, q_dim_red, n) end - + # correcting the direction of the arrow # since qind2r is basically a copy of qind1r # if one have to be corrected the other one # should also be corrected - if(dir(qindr) != dir(qindl)) - qindr = dag(qindr) + if (dir(qindr) != dir(qindl)) + qindr = dag(qindr) end - + indsQ = setindex(inds(T), dag(qindr), 2) indsR = setindex(inds(T), qindr, 1) - + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksR = Vector{Block{2}}(undef, nnzblocksT) - + for n in 1:nnzblocksT - blockT = nzblocksT[n] - - blockQ = (blockT[1], UInt(n)) - nzblocksQ[n] = blockQ - - blockR = (UInt(n), blockT[2]) - nzblocksR[n] = blockR + blockT = nzblocksT[n] + + blockQ = (blockT[1], UInt(n)) + nzblocksQ[n] = blockQ + + blockR = (UInt(n), blockT[2]) + nzblocksR[n] = blockR end - + Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) - - + for n in 1:nnzblocksT - Qb, Rb = Qs[n], Rs[n] - blockQ = nzblocksQ[n] - blockR = nzblocksR[n] - - if VERSION < v"1.5" - # In v1.3 and v1.4 of Julia, Ub has - # a very complicated view wrapper that - # can't be handled efficiently - Qb = copy(Qb) - Rb = copy(Vb) - end - - blockview(Q, blockQ) .= Qb - blockview(R, blockR) .= Rb + Qb, Rb = Qs[n], Rs[n] + blockQ = nzblocksQ[n] + blockR = nzblocksR[n] + + if VERSION < v"1.5" + # In v1.3 and v1.4 of Julia, Ub has + # a very complicated view wrapper that + # can't be handled efficiently + Qb = copy(Qb) + Rb = copy(Vb) + end + + blockview(Q, blockQ) .= Qb + blockview(R, blockR) .= Rb end - - - + return Q, R end - function exp( T::Union{BlockSparseMatrix{ElT},Hermitian{ElT,<:BlockSparseMatrix{ElT}}} ) where {ElT<:Union{Real,Complex}} diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 86fdf2eae6..4f1d33cce1 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -360,36 +360,32 @@ function noinds_error_message(decomp::String) treating the ITensor as a matrix from the primed to the unprimed indices." end - - - - -function add_trivial_index(A::ITensor,Ainds) +function add_trivial_index(A::ITensor, Ainds) α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1) vα = onehot(eltype(A), α => 1) A *= vα - return A,vα,[α] + return A, vα, [α] end -function add_trivial_index(A::ITensor,Linds,Rinds) - vαl,vαr=nothing,nothing +function add_trivial_index(A::ITensor, Linds, Rinds) + vαl, vαr = nothing, nothing if isempty(Linds) - A,vαl,Linds=add_trivial_index(A,Rinds) + A, vαl, Linds = add_trivial_index(A, Rinds) end if isempty(Rinds) - A,vαr,Rinds=add_trivial_index(A,Linds) + A, vαr, Rinds = add_trivial_index(A, Linds) end - return A,vαl,vαr,Linds,Rinds + return A, vαl, vαr, Linds, Rinds end -function remove_trivial_index(Q::ITensor,R::ITensor,vαl,vαr) +function remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) if !isnothing(vαl) #should have only dummy + qr,Link - Q*=dag(vαl) + Q *= dag(vαl) end if !isnothing(vαr) #should have only dummy + qr,Link - R*=dag(vαr) + R *= dag(vαr) end - return Q,R + return Q, R end qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) @@ -401,13 +397,15 @@ function qr(A::ITensor, Linds...; kwargs...) lre = isempty(Lis) || isempty(Ris) # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never # be empty. A essentially becomes 1D after collection. - if (lre) A,vαl,vαr,Lis,Ris=add_trivial_index(A,Lis,Ris) end - + if (lre) + A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) + end + # # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. # - CL,CR = combiner(Lis...),combiner(Ris...) - cL,cR = combinedind(CL),combinedind(CR) + CL, CR = combiner(Lis...), combiner(Ris...) + cL, cR = combinedind(CL), combinedind(CR) AC = A * CR * CL # # Make sure we don't accidentally pass the transpose into the matrix qr routine. @@ -435,10 +433,12 @@ function qr(A::ITensor, Linds...; kwargs...) # # Undo the combine oepration, to recover all tensor indices. # - Q, R = itensor(QT) * dag(CL), itensor(RT)* dag(CR) - + Q, R = itensor(QT) * dag(CL), itensor(RT) * dag(CR) + # Conditionally remove dummy indices. - if (lre) Q,R = remove_trivial_index(Q,R,vαl,vαr) end + if (lre) + Q, R = remove_trivial_index(Q, R, vαl, vαr) + end # # fix up the tag name for the index between Q and R. # diff --git a/test/decomp.jl b/test/decomp.jl index af18a82445..68e51165ac 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -49,64 +49,72 @@ using ITensors, LinearAlgebra, Test ) end - @testset "QR dense on MPS tensor with all possible collections on Q,R" for ninds in [0,1,2,3] + @testset "QR dense on MPS tensor with all possible collections on Q,R" for ninds in + [0, 1, 2, 3] l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") A = randomITensor(l, s, r) - Ainds=inds(A) - Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 3-ninds+1 + Ainds = inds(A) + Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - @testset "QR dense on MP0 tensor with all possible collections on Q,R" for ninds in [0,1,2,3,4] + @testset "QR dense on MP0 tensor with all possible collections on Q,R" for ninds in + [0, 1, 2, 3, 4] l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") A = randomITensor(l, s, s', r) - Ainds=inds(A) - Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 4-ninds+1 + Ainds = inds(A) + Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - - @testset "QR block sparse on MPS tensor with all possible collections on Q,R" for ninds in [0,1,2,3] + + @testset "QR block sparse on MPS tensor with all possible collections on Q,R" for ninds in + [ + 0, 1, 2, 3 + ] l = dag(Index(QN("Sz", 0) => 3; tags="l")) - s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") + s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, r) - Ainds=inds(A) - Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 3-ninds+1 + Ainds = inds(A) + Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR block sparse on MPO tensor with all possible collections on Q,R" for ninds in [0,1,2,3,4] + @testset "QR block sparse on MPO tensor with all possible collections on Q,R" for ninds in + [ + 0, 1, 2, 3, 4 + ] l = dag(Index(QN("Sz", 0) => 3; tags="l")) - s = Index(QN("Sz",-1) => 1, QN("Sz",1) => 1; tags="s") + s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, dag(s'), r) - Ainds=inds(A) - Q, R, q= qr(A,Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds+1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 4-ninds+1 + Ainds = inds(A) + Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - + @testset "QR Heisenberg MPO tensors" begin N = 4 sites = siteinds("S=1", N; conserve_qns=true) @@ -117,16 +125,16 @@ using ITensors, LinearAlgebra, Test ampo .+= "Sz", j, "Sz", j + 1 end H = MPO(ampo, sites; splitblocks=false) - for n in 1:N-1 - W=H[n] - ilr=filterinds(W,tags="l=$n")[1] - ilq=noncommoninds(W,ilr) - Q,R,q=qr(W,ilq) + for n in 1:(N - 1) + W = H[n] + ilr = filterinds(W; tags="l=$n")[1] + ilq = noncommoninds(W, ilr) + Q, R, q = qr(W, ilq) @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q*dag(prime(Q, q)))-δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end end @testset "factorize with QR" begin From 78c556f5c51e7076bb329e7b7a8c57d0aa467d9d Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 17 Nov 2022 13:23:53 -0600 Subject: [PATCH 09/90] Fix qr overload disambiguation problem in julia 1.6 --- NDTensors/src/blocksparse/linearalgebra.jl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 511e694904..63324a2532 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,11 +296,10 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end -#QR a block sparse Rank 2 tensor. -# +# QR a block sparse Rank 2 tensor. # This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qr(T::BlockSparseTensor{ElT,2,StoreT,IndsT}; kwargs...) where {ElT,StoreT,IndsT} +function LinearAlgebra.qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} # getting total number of blocks nnzblocksT = nnzblocks(T) From 401eef34f32e9a0ad73b2129bc1988e474603521 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 17 Nov 2022 17:12:46 -0600 Subject: [PATCH 10/90] Add flux checks for block sparse qr tests --- test/decomp.jl | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index 68e51165ac..0d4fd8d88c 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -81,14 +81,19 @@ using ITensors, LinearAlgebra, Test [ 0, 1, 2, 3 ] - l = dag(Index(QN("Sz", 0) => 3; tags="l")) + expected_Qflux=[QN() ,QN("Sz",0),QN("Sz", 2),QN("Sz",0)] + expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",-2),QN()] + l = dag(Index(QN("Sz", 0) => 1,QN("Sz", 1) => 1,QN("Sz", -1) => 1; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 3; tags="r") + r = Index(QN("Sz", 0) => 1,QN("Sz", 1) => 1,QN("Sz", -1) => 1; tags="r") A = randomITensor(l, s, r) + @test flux(A)==QN("Sz", 0) Ainds = inds(A) Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 + @test flux(Q)==expected_Qflux[ninds+1] + @test flux(R)==expected_Rflux[ninds+1] @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. @@ -100,14 +105,19 @@ using ITensors, LinearAlgebra, Test [ 0, 1, 2, 3, 4 ] + expected_Qflux=[QN() ,QN("Sz",0),QN("Sz", 2),QN("Sz",0),QN("Sz",0)] + expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",-2),QN("Sz",0),QN()] l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, dag(s'), r) + @test flux(A)==QN("Sz", 0) Ainds = inds(A) Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 4 - ninds + 1 + @test flux(Q)==expected_Qflux[ninds+1] + @test flux(R)==expected_Rflux[ninds+1] @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. @@ -127,9 +137,12 @@ using ITensors, LinearAlgebra, Test H = MPO(ampo, sites; splitblocks=false) for n in 1:(N - 1) W = H[n] + @test flux(W)==QN("Sz", 0) ilr = filterinds(W; tags="l=$n")[1] ilq = noncommoninds(W, ilr) Q, R, q = qr(W, ilq) + @test flux(Q)==QN("Sz", 4) + @test flux(R)==QN("Sz",-4) @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. From c6486eb890b3bf72705654c89554c0078941da59 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 18 Nov 2022 11:06:33 -0600 Subject: [PATCH 11/90] Implement RQ decomposition. --- NDTensors/src/exports.jl | 5 +- NDTensors/src/linearalgebra.jl | 89 ++++++++++++++++++- NDTensors/test/linearalgebra.jl | 17 ++++ src/exports.jl | 3 +- src/tensor_operations/matrix_decomposition.jl | 47 ++++++++++ test/decomp.jl | 16 ++++ 6 files changed, 174 insertions(+), 3 deletions(-) diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index e5bdfba1bb..fc724dd54e 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -76,4 +76,7 @@ export tensor, inds, ind, - store + store, + + # linearalgebra.jl + rq \ No newline at end of file diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 9d79aaebe3..7fcab6cd92 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -398,11 +398,98 @@ function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,Ind q = dim(q) < dim(r) ? sim(q) : sim(r) Qinds = IndsT((ind(T, 1), q)) Rinds = IndsT((q, ind(T, 2))) - Q = tensor(Dense(vec(Matrix(QM))), Qinds) + Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided R = tensor(Dense(vec(RM)), Rinds) return Q, R end +# +# Uses kwargs:positive to decide which rq method to call. +# +function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} + if get(kwargs, :positive, false) + RM, QM = rq_positive(matrix(T)) + else + RM, QM = rq(matrix(T)) + end + + # Make the new indices to go onto Q and R + r, q = inds(T) + q = dim(q) < dim(r) ? sim(q) : sim(r) + Qinds = IndsT((q,ind(T, 2))) + Linds = IndsT((ind(T, 1),q)) + Q = NDTensors.tensor(NDTensors.Dense(vec(Matrix(QM))), Qinds) #Q was strided + R = NDTensors.tensor(NDTensors.Dense(vec(RM)), Linds) + return R, Q +end + +# +# Just flip signs between Q and R to get all the diagonals of R >=0. +# For rectangular M the indexing for "diagonal" is non-trivial. +# +function rq_positive(M::AbstractMatrix) + R, sparseQ = rq(M) + Q = convert(Matrix, sparseQ) + nr,nc = size(R) + dr=nr>nc ? nr-nc : 0 #diag is shifted down by dr if nr>nc + for r in 1:nr + if r<=nc && real(R[r+dr, r]) < 0.0 + R[1:r+dr, r] *= -1 + Q[r,:] *= -1 + end + end + return (R, Q) +end + +# +# Lapack replaces A with Q & R carefully packed together. So here we just copy a +# before letting lapack overwirte it. +# +function rq(A::AbstractMatrix{T}; kwargs...) where T + Base.require_one_based_indexing(A) + AA = similar(A, LinearAlgebra._qreltype(T), size(A)) + copyto!(AA, A) + return rq!(AA; kwargs...) +end + +rq!(A::AbstractMatrix) = rq!(A) + +# +# This is where the low level call to lapack actually occurs. Most of the work is +# about unpacking Q and R from the A matrix. +# +function rq!(A::StridedMatrix{<:LAPACK.BlasFloat}) + tau=similar(A, Base.min(size(A)...)) + x=LAPACK.gerqf!(A, tau) + + # Unpack R from the lower portion of A, before orgql! mangles it! + nr,nc=size(A) + mn=Base.min(nr,nc) + R=similar(A,(nr,mn)) + for c in 1:mn + for r in 1:c+nr-mn + R[r,c]=A[r,c+nc-mn] + end + for r in c+1+nr-mn:nr + R[r,c]=0.0 + end + end + # + # If nr>nc we need shift the orth vectors from the bottom of Q up to top before + # unpacking the reflectors. + # + if mn Date: Fri, 18 Nov 2022 12:39:48 -0600 Subject: [PATCH 12/90] Add rq decomp for block sparse matrices --- NDTensors/src/blocksparse/linearalgebra.jl | 94 ++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 63324a2532..1b24f23361 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,6 +296,100 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end +# QR a block sparse Rank 2 tensor. +# This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb +# +function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} + + # getting total number of blocks + nnzblocksT = nnzblocks(T) + nzblocksT = nzblocks(T) + + Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + + for (jj, b) in enumerate(eachnzblock(T)) + blockT = blockview(T, b) + RQb = rq(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + + if (isnothing(RQb)) + return nothing + end + + R, Q = RQb + Qs[jj] = Q + Rs[jj] = R + end + + nb1_lt_nb2 = ( + nblocks(T)[1] < nblocks(T)[2] || + (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) + ) + + # setting the left index of the Q isometry, this should be + # the smaller index of the two indices of of T + qindr = ind(T, 2) + if nb1_lt_nb2 + qindl = sim(ind(T, 1)) + else + qindl = sim(ind(T, 2)) + end + + # can qindl have more blocks than T? + if nblocks(qindl) > nnzblocksT + resize!(qindl, nnzblocksT) + end + + for n in 1:nnzblocksT + q_dim_red = minimum(dims(Rs[n])) + NDTensors.setblockdim!(qindl, q_dim_red, n) + end + + # correcting the direction of the arrow + # if one have to be corrected the other one + # should also be corrected + if (dir(qindl) != dir(qindr)) + qindl = dag(qindl) + end + + indsQ = setindex(inds(T), dag(qindl), 1) + indsR = setindex(inds(T), qindl, 2) + + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) + nzblocksR = Vector{Block{2}}(undef, nnzblocksT) + + for n in 1:nnzblocksT + blockT = nzblocksT[n] + + blockR = (blockT[1], UInt(n)) + nzblocksR[n] = blockR + + blockQ = (UInt(n), blockT[2]) + nzblocksQ[n] = blockQ + end + + Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) + R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) + + for n in 1:nnzblocksT + Qb, Rb = Qs[n], Rs[n] + blockQ = nzblocksQ[n] + blockR = nzblocksR[n] + + if VERSION < v"1.5" + # In v1.3 and v1.4 of Julia, Ub has + # a very complicated view wrapper that + # can't be handled efficiently + Qb = copy(Qb) + Rb = copy(Vb) + end + + blockview(Q, blockQ) .= Qb + blockview(R, blockR) .= Rb + end + + return R, Q +end # QR a block sparse Rank 2 tensor. # This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # From 34248276046f5e68f8055af2666936b863d8d91e Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 18 Nov 2022 12:40:19 -0600 Subject: [PATCH 13/90] Add test for positive=true and rq decomp --- test/decomp.jl | 86 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 17 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index d74e79a081..db2af573dc 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -49,7 +49,7 @@ using ITensors, LinearAlgebra, Test ) end - @testset "QR dense on MPS tensor with all possible collections on Q,R" for ninds in + @testset "QR/RQ dense on MPS tensor with all possible collections on Q,R" for ninds in [0, 1, 2, 3] l = Index(5, "l") s = Index(2, "s") @@ -61,9 +61,15 @@ using ITensors, LinearAlgebra, Test @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + R, Q, q = rq(A, Ainds[1:ninds]) + @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(Q)) == 3 - ninds + 1 + @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - @testset "QR dense on MP0 tensor with all possible collections on Q,R" for ninds in + @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [0, 1, 2, 3, 4] l = Index(5, "l") s = Index(2, "s") @@ -75,24 +81,15 @@ using ITensors, LinearAlgebra, Test @test length(inds(R)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "RQ dense on MPS tensor with all possible collections on R,Q" begin - #for ninds in [0, 1, 2, 3] - - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") - A = randomITensor(l, s, r) - #Ainds = inds(A) - R, Q, q = rq(A, l) - # @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - # @test length(inds(R)) == 3 - ninds + 1 + + R, Q, q = rq(A, Ainds[1:ninds]) + @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - @testset "QR block sparse on MPS tensor with all possible collections on Q,R" for ninds in + @testset "QR/RQ block sparse on MPS tensor with all possible collections on Q,R" for ninds in [ 0, 1, 2, 3 ] @@ -114,9 +111,19 @@ using ITensors, LinearAlgebra, Test # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + expected_Rflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] + expected_Qflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] + R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(Q)) == 3 - ninds + 1 + @test flux(Q)==expected_Qflux[ninds+1] + @test flux(R)==expected_Rflux[ninds+1] + @test A ≈ Q * R atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end + - @testset "QR block sparse on MPO tensor with all possible collections on Q,R" for ninds in + @testset "QR/RQ block sparse on MPO tensor with all possible collections on Q,R" for ninds in [ 0, 1, 2, 3, 4 ] @@ -138,6 +145,45 @@ using ITensors, LinearAlgebra, Test # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + expected_Qflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] + expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] + R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(Q)) == 4 - ninds + 1 + @test flux(Q)==expected_Qflux[ninds+1] + @test flux(R)==expected_Rflux[ninds+1] + @test A ≈ Q * R atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + end + + @testset "QR/RQ dense with positive R" begin + l = Index(5, "l") + s = Index(2, "s") + r = Index(10, "r") + A = randomITensor(l, s, s', r) + Q, R, q = qr(A, l,s,s';positive=true) + @test min(diag(R)...)>0.0 + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + R, Q, q = rq(A, r;positive=true) + @test min(diag(R)...)>0.0 + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + end + + @testset "QR/RQ block sparse with positive R" begin + l = dag(Index(QN("Sz", 0) => 3; tags="l")) + s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") + r = Index(QN("Sz", 0) => 3; tags="r") + A = randomITensor(l, s, dag(s'), r) + Q, R, q = qr(A, l,s,s';positive=true) + @test min(diag(R)...)>0.0 + @test A ≈ Q * R atol = 1e-13 + R, Q, q = rq(A, r;positive=true) + @test min(diag(R)...)>0.0 + @test A ≈ Q * R atol = 1e-13 end @testset "QR Heisenberg MPO tensors" begin @@ -163,6 +209,12 @@ using ITensors, LinearAlgebra, Test # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + R, Q, q = rq(W, ilr) + @test flux(Q)==QN("Sz",0) + @test flux(R)==QN("Sz",0) + @test W ≈ Q * R atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end end From ecd89317779756fca7d7849e0feaa42722bd8e0c Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 18 Nov 2022 16:12:10 -0600 Subject: [PATCH 14/90] Implement QL/LQ decompositions --- src/exports.jl | 4 +- src/tensor_operations/matrix_decomposition.jl | 37 +++++++++++++++++-- test/decomp.jl | 33 ++++++++++++++--- 3 files changed, 63 insertions(+), 11 deletions(-) diff --git a/src/exports.jl b/src/exports.jl index 1feff7618c..59359aadac 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -34,12 +34,14 @@ export # argsdict/argsdict.jl argsdict, - # decomp.jl + # tensor_operations/matrix_decomposition.jl eigen, factorize, polar, qr, rq, + lq, + ql, svd, diag, diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 5108d33131..06c6730080 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -379,16 +379,20 @@ function add_trivial_index(A::ITensor, Linds, Rinds) end function remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) - if !isnothing(vαl) #should have only dummy + qr,Link + if !isnothing(vαl) Q *= dag(vαl) end - if !isnothing(vαr) #should have only dummy + qr,Link + if !isnothing(vαr) R *= dag(vαr) end return Q, R end +#Force users to knowingly ask for zero indices using qr(A,()) syntax qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) +rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) +lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) +ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) function qr(A::ITensor, Linds...; kwargs...) qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R @@ -449,10 +453,9 @@ function qr(A::ITensor, Linds...; kwargs...) return Q, R, q end -rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) function rq(A::ITensor, Linds...; kwargs...) - qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R + qtag::TagSet = get(kwargs, :tags, "Link,rq") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) lre = isempty(Lis) || isempty(Ris) @@ -497,6 +500,32 @@ function rq(A::ITensor, Linds...; kwargs...) return R, Q, q end +function lq(A::ITensor, Linds...; kwargs...) + Q, L, q = qr(A, uniqueinds(A, Linds...)) + # + # fix up the tag name for the index between Q and R. + # + qtag::TagSet = get(kwargs, :tags, "Link,lq") #tag for new index between Q and R + settags!(Q, qtag, q) + settags!(L, qtag, q) + q = settags(q, qtag) + + return L, Q, q +end + +function ql(A::ITensor, Linds...; kwargs...) + Q, L, q = rq(A, uniqueinds(A, Linds...)) + # + # fix up the tag name for the index between Q and R. + # + qtag::TagSet = get(kwargs, :tags, "Link,ql") #tag for new index between Q and R + settags!(Q, qtag, q) + settags!(L, qtag, q) + q = settags(q, qtag) + + return L, Q, q +end + polar(A::ITensor; kwargs...) = error(noinds_error_message("polar")) # TODO: allow custom tags in internal indices? diff --git a/test/decomp.jl b/test/decomp.jl index db2af573dc..561f8b8f64 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -1,4 +1,5 @@ using ITensors, LinearAlgebra, Test +import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this? @testset "ITensor Decompositions" begin @testset "truncate!" begin @@ -49,8 +50,8 @@ using ITensors, LinearAlgebra, Test ) end - @testset "QR/RQ dense on MPS tensor with all possible collections on Q,R" for ninds in - [0, 1, 2, 3] + @testset "QR/RQ/QL/LQ decomp on MPS dense tensor with all possible collections on Q/R/L" for ninds in + [0,1,2,3] l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") @@ -61,12 +62,32 @@ using ITensors, LinearAlgebra, Test @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test q==commonind(Q,R) + @test hastags(q,"qr") R, Q, q = rq(A, Ainds[1:ninds]) - @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test q==commonind(Q,R) + @test hastags(q,"rq") + + L, Q, q = lq(A,Ainds[1:ninds]) + @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. + @test length(inds(Q)) == 3 - ninds + 1 + @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test q==commonind(Q,L) + @test hastags(q,"lq") + + Q, L, q = ITensors.ql(A,Ainds[1:ninds]) + @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. + @test length(inds(L)) == 3 - ninds + 1 + @test A ≈ Q * L atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test q==commonind(Q,L) + @test hastags(q,"ql") end @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in @@ -83,7 +104,7 @@ using ITensors, LinearAlgebra, Test @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 R, Q, q = rq(A, Ainds[1:ninds]) - @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -114,7 +135,7 @@ using ITensors, LinearAlgebra, Test expected_Rflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] expected_Qflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q)==expected_Qflux[ninds+1] @test flux(R)==expected_Rflux[ninds+1] @@ -149,7 +170,7 @@ using ITensors, LinearAlgebra, Test expected_Qflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(R)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test flux(Q)==expected_Qflux[ninds+1] @test flux(R)==expected_Rflux[ninds+1] From b75f40d31a93891314f3b7769d0bef98aafdee33 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 19 Nov 2022 11:19:43 -0600 Subject: [PATCH 15/90] Add tests for ComplexF64 and upper /lower checks for R/L --- test/decomp.jl | 91 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 87 insertions(+), 4 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index 561f8b8f64..79b6b8d705 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -1,6 +1,77 @@ using ITensors, LinearAlgebra, Test +using Printf import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this? +#brute force method to control the default float display format. +Base.show(io::IO, f::Float64) = @printf(io, "%1.3f", f) + +# +# Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. +# +function is_upper(At::NDTensors.Tensor)::Bool + nr,nc=dims(At) + dc=Base.max(0,dim(nr)-dim(nc)) #column off set for rectangular matrices. + nzeros=0 + for i in CartesianIndices(At) + if i[1]>i[2]+dc + if abs(At[i])>0.0 #row>col is lower triangle + return false + else + nzeros+=1 + end + end + end + # + # Debug code: Make some noise if At is not a vector and we still found no zeros. + # + # if nzeros==0 && nr>1 && nc>1 + # @show nr nc dc At + # end + return true +end + +# +# A must be rank 2 +# +function is_upper(l::Index,A::ITensor, r::Index)::Bool + @assert length(inds(A))==2 + if inds(A) != IndexSet(l, r) + A = permute(A, l, r) + end + return is_upper(NDTensors.tensor(A)) +end + +# +# With left index specified +# +function is_upper(l::Index,A::ITensor)::Bool + other=noncommoninds(A,l) + if (length(other)==1) + return is_upper(l,A,other[1]) + else + # use combiner to gather all the "other" indices into one. + C=combiner(other...) + AC=A*C + return is_upper(l,AC,combinedind(C)) + end +end +is_lower(l::Index,A::ITensor)::Bool = is_upper(A,l) + +# +# With right index specified +# +function is_upper(A::ITensor,r::Index)::Bool + other=noncommoninds(A,r) + if (length(other)==1) + return is_upper(other[1],A,r) + else + C=combiner(other...) + AC=A*C + return is_upper(combinedind(C),AC,r) + end +end +is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) + @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -50,12 +121,11 @@ import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this ) end - @testset "QR/RQ/QL/LQ decomp on MPS dense tensor with all possible collections on Q/R/L" for ninds in - [0,1,2,3] + @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in [0,1,2,3], elt in [Float64,ComplexF64] l = Index(5, "l") s = Index(2, "s") - r = Index(10, "r") - A = randomITensor(l, s, r) + r = Index(5, "r") + A = randomITensor(elt,l, s, r) Ainds = inds(A) Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @@ -64,6 +134,9 @@ import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q==commonind(Q,R) @test hastags(q,"qr") + if (length(inds(R))>1) + @test is_upper(q,R) #specify the left index + end R, Q, q = rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @@ -72,6 +145,9 @@ import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q==commonind(Q,R) @test hastags(q,"rq") + if (length(inds(R))>1) + @test is_upper(R,q) #specify the right index + end L, Q, q = lq(A,Ainds[1:ninds]) @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. @@ -80,6 +156,9 @@ import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q==commonind(Q,L) @test hastags(q,"lq") + if (length(inds(L))>1) + @test is_lower(L,q) #specify the right index + end Q, L, q = ITensors.ql(A,Ainds[1:ninds]) @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. @@ -88,6 +167,9 @@ import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q==commonind(Q,L) @test hastags(q,"ql") + if (length(inds(L))>1) + @test is_lower(q,L) #specify the right index + end end @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in @@ -342,6 +424,7 @@ import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim end end + end nothing From 93c391f5e80c0b247734e6d3190a233f2032d8f1 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 19 Nov 2022 13:37:52 -0600 Subject: [PATCH 16/90] Fix flakey fail of NDTensors unit tests --- NDTensors/test/linearalgebra.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 49eb5a5b9b..cb2fee6322 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,6 +1,7 @@ using NDTensors using LinearAlgebra using Test +import NDTensors: rq #rq is in exports.jl, so why the hell do we need this? @testset "random_orthog" begin n, m = 10, 4 From fd8546ce25fca783eecc3a6b9ef6d0ef2787ddd3 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 19 Nov 2022 14:34:00 -0600 Subject: [PATCH 17/90] Endless struggle to get julia to see symbols I *already* put in exports.jl! --- test/decomp.jl | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index 79b6b8d705..849d21df5c 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -1,9 +1,5 @@ using ITensors, LinearAlgebra, Test -using Printf -import ITensors: lq,ql #these are in exports.jl, so why the hell do we need this? - -#brute force method to control the default float display format. -Base.show(io::IO, f::Float64) = @printf(io, "%1.3f", f) +import ITensors: rq,lq,ql #these are in exports.jl, so why the hell do we need this? # # Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. From 3b4962e02ec8301b1bf6fddee90fb3ef592d8593 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 19 Nov 2022 15:03:40 -0600 Subject: [PATCH 18/90] Removed unused version qr() --- NDTensors/src/dense/dense.jl | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/NDTensors/src/dense/dense.jl b/NDTensors/src/dense/dense.jl index d3634e0f37..e650b54de5 100644 --- a/NDTensors/src/dense/dense.jl +++ b/NDTensors/src/dense/dense.jl @@ -968,26 +968,6 @@ function LinearAlgebra.svd( return U, S, V, spec end -# qr decomposition of an order-n tensor according to -# positions Lpos and Rpos -function LinearAlgebra.qr( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - QM, RM = qr(M; kwargs...) - q = ind(QM, 2) - r = ind(RM, 1) - # TODO: simplify this by permuting inds(T) by (Lpos,Rpos) - # then grab Linds,Rinds - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Qinds = push(Linds, r) - Q = reshape(QM, Qinds) - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - Rinds = pushfirst(Rinds, r) - R = reshape(RM, Rinds) - return Q, R -end - # polar decomposition of an order-n tensor according to positions Lpos # and Rpos function polar( From 3464d7ff8a203535b7b448f52c9d33e3b6eb8f66 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sun, 20 Nov 2022 14:37:20 -0600 Subject: [PATCH 19/90] Implement rank reducing QR/RQ/QL/LQ --- NDTensors/src/linearalgebra.jl | 89 +++++++++++++++++++++++++++++---- NDTensors/test/linearalgebra.jl | 21 +++++++- test/decomp.jl | 83 ++++++++++++++++++++++++------ 3 files changed, 167 insertions(+), 26 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 7fcab6cd92..24eca2e2a2 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -383,6 +383,58 @@ function LinearAlgebra.eigen( V = complex(tensor(Dense(vec(VM)), Vinds)) return D, V, spec end +# +# QR rank reduction helpers +# +function find_zero_rows(R::AbstractMatrix,eps::Float64)::Array{Bool} where {ElT,IndsT} + nr,nc=size(R) + zeros=falses(nr) + for r in 1:nr + s=0.0 + for c in 1:nc + s=max(s,abs(R[r, c])) + end + zeros[r]= (s<=eps) + end + return zeros +end + +# +# Trim out zero rows of R within tolerance eps. Also trim the corresponding columns +# of Q. +# +function trim_rows(R::AbstractMatrix,Q::AbstractMatrix,eps::Float64) where {ElT,IndsT} + zeros=find_zero_rows(R,eps) + num_zero_rows=sum(zeros) + if num_zero_rows==0 + return R,Q + end + #@printf "Rank Reveal removing %4i rows with epsrr=%.1e\n" num_zero_rows eps + Rnr,Rnc=size(R) + Qnr,Qnc=size(Q) + #@assert Rnr==Qnc Q is strided to we can't asume this + R1nr=Rnr-num_zero_rows + T=eltype(R) + R1=Matrix{T}(undef,R1nr,Rnc) + Q1=Matrix{T}(undef,Qnr ,R1nr) + r1=1 + for r in 1:Rnr + if zeros[r]==false + R1[r1,:]=R[r,:] #transfer row + Q1[:,r1]=Q[:,r] #transfer column + r1+=1 #next row in rank reduces matrices. + end #if zero + end #for r + return R1,Q1 +end +# +# Trim out zero columnss of R within tolerance eps. Also trim the corresponding rows +# of Q. +# +function trim_columns(R::AbstractMatrix,Q::AbstractMatrix,eps::Float64) where {ElT,IndsT} + R,Q=trim_rows(transpose(R),transpose(Q),eps) + return transpose(R),transpose(Q) +end function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} positive = get(kwargs, :positive, false) @@ -393,11 +445,20 @@ function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,Ind else QM, RM = qr(matrix(T)) end + # + # Do row removal for rank revealing RQ + # + epsrr::Float64 = get(kwargs, :epsrr , -1.0) + if epsrr>=0.0 + RM,QM=trim_rows(RM,QM,epsrr) + end + # # Make the new indices to go onto Q and R - q, r = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) - Qinds = IndsT((ind(T, 1), q)) - Rinds = IndsT((q, ind(T, 2))) + # + IndexT=IndsT.parameters[1] + nq = IndexT(size(RM)[1]) #dim of the link index + Qinds = IndsT((ind(T, 1), nq)) + Rinds = IndsT((nq, ind(T, 2))) Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided R = tensor(Dense(vec(RM)), Rinds) return Q, R @@ -413,13 +474,23 @@ function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} RM, QM = rq(matrix(T)) end + + # + # Do row removal for rank revealing RQ + # + epsrr::Float64 = get(kwargs, :epsrr , -1.0) + if epsrr>=0.0 + RM,QM=trim_columns(RM,QM,epsrr) + end + # # Make the new indices to go onto Q and R - r, q = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) - Qinds = IndsT((q,ind(T, 2))) - Linds = IndsT((ind(T, 1),q)) + # + IndexT=IndsT.parameters[1] + nq = IndexT(size(RM)[2]) #dim of the link index + Qinds = IndsT((nq,ind(T, 2))) + Rinds = IndsT((ind(T, 1),nq)) Q = NDTensors.tensor(NDTensors.Dense(vec(Matrix(QM))), Qinds) #Q was strided - R = NDTensors.tensor(NDTensors.Dense(vec(RM)), Linds) + R = NDTensors.tensor(NDTensors.Dense(vec(RM)), Rinds) return R, Q end diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index cb2fee6322..cc1836a22b 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,7 +1,6 @@ using NDTensors using LinearAlgebra using Test -import NDTensors: rq #rq is in exports.jl, so why the hell do we need this? @testset "random_orthog" begin n, m = 10, 4 @@ -38,4 +37,24 @@ end @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0,nm)) atol = 1e-13 end +@testset "Dense Rank revealing QR/RQ decomposition" begin + n, m = 4,8 + A = randomTensor(n, m) + # make some columns lineary dependent + A[2,:]=A[1,:]*1.1 + A[4,:]=A[1,:]*2.1 + Q,R=qr(A;epsrr=1e-12) + @test dim(Q,2)==n-2 #make 2 columns actually got removed. + @test dim(R,1)==n-2 #make 2 rows actually got removed. + @test A ≈ Q * R atol = 1e-12 + nm=dim(Q,2) + @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0,nm)) atol = 1e-12 + + R,Q=rq(A;epsrr=1e-12) + @test dim(Q,1)==n-2 #make 2 rows actually got removed. + @test dim(R,2)==n-2 #make 2 columns actually got removed. + @test A ≈ R * Q atol = 1e-12 + nm=dim(Q,1) + @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0,nm)) atol = 1e-12 +end nothing diff --git a/test/decomp.jl b/test/decomp.jl index 849d21df5c..652422baba 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -1,5 +1,4 @@ using ITensors, LinearAlgebra, Test -import ITensors: rq,lq,ql #these are in exports.jl, so why the hell do we need this? # # Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. @@ -12,17 +11,9 @@ function is_upper(At::NDTensors.Tensor)::Bool if i[1]>i[2]+dc if abs(At[i])>0.0 #row>col is lower triangle return false - else - nzeros+=1 end end end - # - # Debug code: Make some noise if At is not a vector and we still found no zeros. - # - # if nzeros==0 && nr>1 && nc>1 - # @show nr nc dc At - # end return true end @@ -68,6 +59,31 @@ function is_upper(A::ITensor,r::Index)::Bool end is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) +# +# Makes all columns lineary depenedent but scaled differently. +# +function rank_fix(A::ITensor, Linds...) + Lis = commoninds(A, (Linds...)) + Ris = uniqueinds(A, Lis) + # + # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. + # + CL, CR = combiner(Lis...), combiner(Ris...) + cL, cR = combinedind(CL), combinedind(CR) + AC = A * CR * CL + if inds(AC) != IndexSet(cL, cR) + AC = permute(AC, cL, cR) + end + At=tensor(AC) + nr,nc=dims(At) + @assert nc>=2 + for c in 2:nc + At[:,c]=At[:,1]*1.05^c + end + return itensor(At) * dag(CL) * dag(CR) +end + + @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -117,6 +133,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) ) end + # Julia 1.6 makes it very difficult to split the exceedingly long line of code. @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in [0,1,2,3], elt in [Float64,ComplexF64] l = Index(5, "l") s = Index(2, "s") @@ -134,7 +151,8 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) @test is_upper(q,R) #specify the left index end - R, Q, q = rq(A, Ainds[1:ninds]) + #Julia 1.6 seems to be very erratic about seeing exported symbols like rq. + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -168,6 +186,38 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) end end + # Julia 1.6 makes it very difficult to split the exceedingly long line of code. + @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in [1,2,3], elt in [Float64,ComplexF64] + l = Index(5, "l") + s = Index(2, "s") + r = Index(5, "r") + A = randomITensor(elt,l, s, s',r) + + Ainds = inds(A) + A=rank_fix(A,Ainds[1:ninds]) #make two sets of column linear dependent on column 1. + Q, R, q = qr(A, Ainds[1:ninds];epsrr=1e-12) #calling qr(A) triggers not supported error. + @test dim(q)==1 + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + R, Q, q = ITensors.rq(A, Ainds[1:ninds];epsrr=1e-12) + @test dim(q)==1 + @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + L, Q, q = lq(A,Ainds[1:ninds];epsrr=1e-12) + @test dim(q)==1 + @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + Q, L, q = ITensors.ql(A,Ainds[1:ninds];epsrr=1e-12) + @test dim(q)==1 + @test A ≈ Q * L atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + end + + @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [0, 1, 2, 3, 4] l = Index(5, "l") @@ -181,7 +231,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, Ainds[1:ninds]) + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -212,7 +262,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 expected_Rflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] expected_Qflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q)==expected_Qflux[ninds+1] @@ -247,7 +297,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) expected_Qflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test flux(Q)==expected_Qflux[ninds+1] @@ -266,7 +316,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) @test min(diag(R)...)>0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, r;positive=true) + R, Q, q = ITensors.rq(A, r;positive=true) @test min(diag(R)...)>0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -280,7 +330,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) Q, R, q = qr(A, l,s,s';positive=true) @test min(diag(R)...)>0.0 @test A ≈ Q * R atol = 1e-13 - R, Q, q = rq(A, r;positive=true) + R, Q, q = ITensors.rq(A, r;positive=true) @test min(diag(R)...)>0.0 @test A ≈ Q * R atol = 1e-13 end @@ -309,7 +359,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - R, Q, q = rq(W, ilr) + R, Q, q = ITensors.rq(W, ilr) @test flux(Q)==QN("Sz",0) @test flux(R)==QN("Sz",0) @test W ≈ Q * R atol = 1e-13 @@ -421,6 +471,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) end end + end nothing From 2a2391e211e4e8ee21b71d87106edf2816e87fd5 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sun, 20 Nov 2022 14:39:04 -0600 Subject: [PATCH 20/90] Pass through kwargs for LQ/QL functions --- src/tensor_operations/matrix_decomposition.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 06c6730080..eeb698951a 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -501,7 +501,7 @@ function rq(A::ITensor, Linds...; kwargs...) end function lq(A::ITensor, Linds...; kwargs...) - Q, L, q = qr(A, uniqueinds(A, Linds...)) + Q, L, q = qr(A, uniqueinds(A, Linds...);kwargs...) # # fix up the tag name for the index between Q and R. # @@ -514,7 +514,7 @@ function lq(A::ITensor, Linds...; kwargs...) end function ql(A::ITensor, Linds...; kwargs...) - Q, L, q = rq(A, uniqueinds(A, Linds...)) + Q, L, q = rq(A, uniqueinds(A, Linds...);kwargs...) # # fix up the tag name for the index between Q and R. # From b9dbda70b58b80c673b09beaed02a1a866148417 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sun, 20 Nov 2022 14:46:14 -0600 Subject: [PATCH 21/90] Run the formatter --- NDTensors/src/exports.jl | 2 +- NDTensors/src/linearalgebra.jl | 48 ++--- NDTensors/test/linearalgebra.jl | 16 +- src/exports.jl | 2 +- src/tensor_operations/matrix_decomposition.jl | 12 +- test/decomp.jl | 188 +++++++++--------- 6 files changed, 135 insertions(+), 133 deletions(-) diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index fc724dd54e..190d78de2b 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -79,4 +79,4 @@ export store, # linearalgebra.jl - rq \ No newline at end of file + rq diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 7fcab6cd92..7ab93e19c1 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -412,12 +412,12 @@ function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} else RM, QM = rq(matrix(T)) end - + # Make the new indices to go onto Q and R r, q = inds(T) q = dim(q) < dim(r) ? sim(q) : sim(r) - Qinds = IndsT((q,ind(T, 2))) - Linds = IndsT((ind(T, 1),q)) + Qinds = IndsT((q, ind(T, 2))) + Linds = IndsT((ind(T, 1), q)) Q = NDTensors.tensor(NDTensors.Dense(vec(Matrix(QM))), Qinds) #Q was strided R = NDTensors.tensor(NDTensors.Dense(vec(RM)), Linds) return R, Q @@ -430,12 +430,12 @@ end function rq_positive(M::AbstractMatrix) R, sparseQ = rq(M) Q = convert(Matrix, sparseQ) - nr,nc = size(R) - dr=nr>nc ? nr-nc : 0 #diag is shifted down by dr if nr>nc + nr, nc = size(R) + dr = nr > nc ? nr - nc : 0 #diag is shifted down by dr if nr>nc for r in 1:nr - if r<=nc && real(R[r+dr, r]) < 0.0 - R[1:r+dr, r] *= -1 - Q[r,:] *= -1 + if r <= nc && real(R[r + dr, r]) < 0.0 + R[1:(r + dr), r] *= -1 + Q[r, :] *= -1 end end return (R, Q) @@ -445,7 +445,7 @@ end # Lapack replaces A with Q & R carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function rq(A::AbstractMatrix{T}; kwargs...) where T +function rq(A::AbstractMatrix{T}; kwargs...) where {T} Base.require_one_based_indexing(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) copyto!(AA, A) @@ -459,35 +459,35 @@ rq!(A::AbstractMatrix) = rq!(A) # about unpacking Q and R from the A matrix. # function rq!(A::StridedMatrix{<:LAPACK.BlasFloat}) - tau=similar(A, Base.min(size(A)...)) - x=LAPACK.gerqf!(A, tau) - + tau = similar(A, Base.min(size(A)...)) + x = LAPACK.gerqf!(A, tau) + # Unpack R from the lower portion of A, before orgql! mangles it! - nr,nc=size(A) - mn=Base.min(nr,nc) - R=similar(A,(nr,mn)) + nr, nc = size(A) + mn = Base.min(nr, nc) + R = similar(A, (nr, mn)) for c in 1:mn - for r in 1:c+nr-mn - R[r,c]=A[r,c+nc-mn] + for r in 1:(c + nr - mn) + R[r, c] = A[r, c + nc - mn] end - for r in c+1+nr-mn:nr - R[r,c]=0.0 + for r in (c + 1 + nr - mn):nr + R[r, c] = 0.0 end end # # If nr>nc we need shift the orth vectors from the bottom of Q up to top before # unpacking the reflectors. # - if mni[2]+dc - if abs(At[i])>0.0 #row>col is lower triangle + if i[1] > i[2] + dc + if abs(At[i]) > 0.0 #row>col is lower triangle return false else - nzeros+=1 + nzeros += 1 end end end @@ -29,8 +28,8 @@ end # # A must be rank 2 # -function is_upper(l::Index,A::ITensor, r::Index)::Bool - @assert length(inds(A))==2 +function is_upper(l::Index, A::ITensor, r::Index)::Bool + @assert length(inds(A)) == 2 if inds(A) != IndexSet(l, r) A = permute(A, l, r) end @@ -40,33 +39,33 @@ end # # With left index specified # -function is_upper(l::Index,A::ITensor)::Bool - other=noncommoninds(A,l) - if (length(other)==1) - return is_upper(l,A,other[1]) +function is_upper(l::Index, A::ITensor)::Bool + other = noncommoninds(A, l) + if (length(other) == 1) + return is_upper(l, A, other[1]) else # use combiner to gather all the "other" indices into one. - C=combiner(other...) - AC=A*C - return is_upper(l,AC,combinedind(C)) + C = combiner(other...) + AC = A * C + return is_upper(l, AC, combinedind(C)) end end -is_lower(l::Index,A::ITensor)::Bool = is_upper(A,l) +is_lower(l::Index, A::ITensor)::Bool = is_upper(A, l) # # With right index specified # -function is_upper(A::ITensor,r::Index)::Bool - other=noncommoninds(A,r) - if (length(other)==1) - return is_upper(other[1],A,r) +function is_upper(A::ITensor, r::Index)::Bool + other = noncommoninds(A, r) + if (length(other) == 1) + return is_upper(other[1], A, r) else - C=combiner(other...) - AC=A*C - return is_upper(combinedind(C),AC,r) + C = combiner(other...) + AC = A * C + return is_upper(combinedind(C), AC, r) end end -is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) +is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @testset "ITensor Decompositions" begin @testset "truncate!" begin @@ -117,59 +116,65 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) ) end - @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in [0,1,2,3], elt in [Float64,ComplexF64] + @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in + [ + 0, 1, 2, 3 + ], + elt in [Float64, ComplexF64] + l = Index(5, "l") s = Index(2, "s") r = Index(5, "r") - A = randomITensor(elt,l, s, r) + A = randomITensor(elt, l, s, r) Ainds = inds(A) Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q==commonind(Q,R) - @test hastags(q,"qr") - if (length(inds(R))>1) - @test is_upper(q,R) #specify the left index + @test q == commonind(Q, R) + @test hastags(q, "qr") + if (length(inds(R)) > 1) + @test is_upper(q, R) #specify the left index end - R, Q, q = rq(A, Ainds[1:ninds]) + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q==commonind(Q,R) - @test hastags(q,"rq") - if (length(inds(R))>1) - @test is_upper(R,q) #specify the right index + @test q == commonind(Q, R) + @test hastags(q, "rq") + if (length(inds(R)) > 1) + @test is_upper(R, q) #specify the right index end - L, Q, q = lq(A,Ainds[1:ninds]) + L, Q, q = lq(A, Ainds[1:ninds]) @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q==commonind(Q,L) - @test hastags(q,"lq") - if (length(inds(L))>1) - @test is_lower(L,q) #specify the right index + @test q == commonind(Q, L) + @test hastags(q, "lq") + if (length(inds(L)) > 1) + @test is_lower(L, q) #specify the right index end - Q, L, q = ITensors.ql(A,Ainds[1:ninds]) + Q, L, q = ITensors.ql(A, Ainds[1:ninds]) @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. @test length(inds(L)) == 3 - ninds + 1 - @test A ≈ Q * L atol = 1e-13 + @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q==commonind(Q,L) - @test hastags(q,"ql") - if (length(inds(L))>1) - @test is_lower(q,L) #specify the right index + @test q == commonind(Q, L) + @test hastags(q, "ql") + if (length(inds(L)) > 1) + @test is_lower(q, L) #specify the right index end end - @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in - [0, 1, 2, 3, 4] + @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ + 0, 1, 2, 3, 4 + ] l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") @@ -181,7 +186,7 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, Ainds[1:ninds]) + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -189,99 +194,97 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) end @testset "QR/RQ block sparse on MPS tensor with all possible collections on Q,R" for ninds in - [ + [ 0, 1, 2, 3 ] - expected_Qflux=[QN() ,QN("Sz",0),QN("Sz", 2),QN("Sz",0)] - expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",-2),QN()] - l = dag(Index(QN("Sz", 0) => 1,QN("Sz", 1) => 1,QN("Sz", -1) => 1; tags="l")) + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -2), QN()] + l = dag(Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 1,QN("Sz", 1) => 1,QN("Sz", -1) => 1; tags="r") + r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="r") A = randomITensor(l, s, r) - @test flux(A)==QN("Sz", 0) + @test flux(A) == QN("Sz", 0) Ainds = inds(A) Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 - @test flux(Q)==expected_Qflux[ninds+1] - @test flux(R)==expected_Rflux[ninds+1] + @test flux(Q) == expected_Qflux[ninds + 1] + @test flux(R) == expected_Rflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] - expected_Qflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + expected_Rflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Qflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 - @test flux(Q)==expected_Qflux[ninds+1] - @test flux(R)==expected_Rflux[ninds+1] + @test flux(Q) == expected_Qflux[ninds + 1] + @test flux(R) == expected_Rflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR/RQ block sparse on MPO tensor with all possible collections on Q,R" for ninds in - [ + [ 0, 1, 2, 3, 4 ] - expected_Qflux=[QN() ,QN("Sz",0),QN("Sz", 2),QN("Sz",0),QN("Sz",0)] - expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",-2),QN("Sz",0),QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN()] l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, dag(s'), r) - @test flux(A)==QN("Sz", 0) + @test flux(A) == QN("Sz", 0) Ainds = inds(A) Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 4 - ninds + 1 - @test flux(Q)==expected_Qflux[ninds+1] - @test flux(R)==expected_Rflux[ninds+1] + @test flux(Q) == expected_Qflux[ninds + 1] + @test flux(R) == expected_Rflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Qflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] - expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 - @test flux(Q)==expected_Qflux[ninds+1] - @test flux(R)==expected_Rflux[ninds+1] + @test flux(Q) == expected_Qflux[ninds + 1] + @test flux(R) == expected_Rflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - + @testset "QR/RQ dense with positive R" begin l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") A = randomITensor(l, s, s', r) - Q, R, q = qr(A, l,s,s';positive=true) - @test min(diag(R)...)>0.0 + Q, R, q = qr(A, l, s, s'; positive=true) + @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, r;positive=true) - @test min(diag(R)...)>0.0 + R, Q, q = ITensors.rq(A, r; positive=true) + @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end - + @testset "QR/RQ block sparse with positive R" begin l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, dag(s'), r) - Q, R, q = qr(A, l,s,s';positive=true) - @test min(diag(R)...)>0.0 + Q, R, q = qr(A, l, s, s'; positive=true) + @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 - R, Q, q = rq(A, r;positive=true) - @test min(diag(R)...)>0.0 + R, Q, q = ITensors.rq(A, r; positive=true) + @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 end @@ -297,21 +300,21 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) H = MPO(ampo, sites; splitblocks=false) for n in 1:(N - 1) W = H[n] - @test flux(W)==QN("Sz", 0) + @test flux(W) == QN("Sz", 0) ilr = filterinds(W; tags="l=$n")[1] ilq = noncommoninds(W, ilr) Q, R, q = qr(W, ilq) - @test flux(Q)==QN("Sz", 4) - @test flux(R)==QN("Sz",-4) + @test flux(Q) == QN("Sz", 4) + @test flux(R) == QN("Sz", -4) @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - R, Q, q = rq(W, ilr) - @test flux(Q)==QN("Sz",0) - @test flux(R)==QN("Sz",0) + R, Q, q = ITensors.rq(W, ilr) + @test flux(Q) == QN("Sz", 0) + @test flux(R) == QN("Sz", 0) @test W ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end @@ -420,7 +423,6 @@ is_lower(A::ITensor,r::Index)::Bool = is_upper(r,A) @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim end end - end nothing From 0f8c09262ddf1c9a0d7411e34d925c74b226280c Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 21 Nov 2022 09:24:31 -0600 Subject: [PATCH 22/90] Try and fix lq symbol clash exposed in julia 1.8 --- src/tensor_operations/matrix_decomposition.jl | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index cbdc7d766d..1488741ce8 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -389,12 +389,14 @@ function remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) end #Force users to knowingly ask for zero indices using qr(A,()) syntax -qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) +LinearAlgebra.qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) -lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) +LinearAlgebra.lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) -function qr(A::ITensor, Linds...; kwargs...) +# qr is exported by the LinearAlgebra module so we need acknowledge that to avoid +# intermitent run time errors. +function LinearAlgebra.qr(A::ITensor, Linds...; kwargs...) qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) @@ -500,7 +502,9 @@ function rq(A::ITensor, Linds...; kwargs...) return R, Q, q end -function lq(A::ITensor, Linds...; kwargs...) +# lq is exported by the LinearAlgebra module so we need acknowledge that to avoid +# intermitent run time errors. +function LinearAlgebra.lq(A::ITensor, Linds...; kwargs...) Q, L, q = qr(A, uniqueinds(A, Linds...); kwargs...) # # fix up the tag name for the index between Q and R. From d0989cc1712b0f874224aa31578e2a254e70c8f0 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 21 Nov 2022 11:05:30 -0600 Subject: [PATCH 23/90] Merge from RQQLLQ branch --- NDTensors/test/linearalgebra.jl | 1 + test/decomp.jl | 17 ++++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 5902d7fc9f..ec19c08ddc 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,6 +1,7 @@ using NDTensors using LinearAlgebra using Test +import NDTensors: rq #Julia ignoring my export ... again! @testset "random_orthog" begin n, m = 10, 4 diff --git a/test/decomp.jl b/test/decomp.jl index 308bdce330..4f2ca02337 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -1,12 +1,11 @@ -using ITensors, LinearAlgebra, Test +using ITensors, NDTensors, LinearAlgebra, Test # # Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. # -function is_upper(At::NDTensors.Tensor)::Bool +function is_upper(At::Tensor)::Bool nr, nc = dims(At) dc = Base.max(0, dim(nr) - dim(nc)) #column off set for rectangular matrices. - nzeros = 0 for i in CartesianIndices(At) if i[1] > i[2] + dc if abs(At[i]) > 0.0 #row>col is lower triangle @@ -75,7 +74,7 @@ function rank_fix(A::ITensor, Linds...) AC = permute(AC, cL, cR) end At=tensor(AC) - nr,nc=dims(At) + nc=dim(At,2) @assert nc>=2 for c in 2:nc At[:,c]=At[:,1]*1.05^c @@ -194,24 +193,24 @@ end A = randomITensor(elt,l, s, s',r) Ainds = inds(A) - A=rank_fix(A,Ainds[1:ninds]) #make two sets of column linear dependent on column 1. + A=rank_fix(A,Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. Q, R, q = qr(A, Ainds[1:ninds];epsrr=1e-12) #calling qr(A) triggers not supported error. - @test dim(q)==1 + @test dim(q)==1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 R, Q, q = ITensors.rq(A, Ainds[1:ninds];epsrr=1e-12) - @test dim(q)==1 + @test dim(q)==1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 L, Q, q = lq(A,Ainds[1:ninds];epsrr=1e-12) - @test dim(q)==1 + @test dim(q)==1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 Q, L, q = ITensors.ql(A,Ainds[1:ninds];epsrr=1e-12) - @test dim(q)==1 + @test dim(q)==1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 From 990f8ca5e04474f9c2658a9772c50168cda253b0 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 29 Nov 2022 07:55:14 -0600 Subject: [PATCH 24/90] Run formatter on Rank Revial code --- NDTensors/src/linearalgebra.jl | 75 ++++++++++++++++----------------- NDTensors/test/linearalgebra.jl | 30 ++++++------- 2 files changed, 52 insertions(+), 53 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index c5cc73acaa..0b5a80c543 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -386,15 +386,15 @@ end # # QR rank reduction helpers # -function find_zero_rows(R::AbstractMatrix,eps::Float64)::Array{Bool} where {ElT,IndsT} - nr,nc=size(R) - zeros=falses(nr) +function find_zero_rows(R::AbstractMatrix, eps::Float64)::Array{Bool} where {ElT,IndsT} + nr, nc = size(R) + zeros = falses(nr) for r in 1:nr - s=0.0 + s = 0.0 for c in 1:nc - s=max(s,abs(R[r, c])) + s = max(s, abs(R[r, c])) end - zeros[r]= (s<=eps) + zeros[r] = (s <= eps) end return zeros end @@ -403,37 +403,37 @@ end # Trim out zero rows of R within tolerance eps. Also trim the corresponding columns # of Q. # -function trim_rows(R::AbstractMatrix,Q::AbstractMatrix,eps::Float64) where {ElT,IndsT} - zeros=find_zero_rows(R,eps) - num_zero_rows=sum(zeros) - if num_zero_rows==0 - return R,Q +function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, eps::Float64) where {ElT,IndsT} + zeros = find_zero_rows(R, eps) + num_zero_rows = sum(zeros) + if num_zero_rows == 0 + return R, Q end #@printf "Rank Reveal removing %4i rows with epsrr=%.1e\n" num_zero_rows eps - Rnr,Rnc=size(R) - Qnr,Qnc=size(Q) + Rnr, Rnc = size(R) + Qnr, Qnc = size(Q) #@assert Rnr==Qnc Q is strided to we can't asume this - R1nr=Rnr-num_zero_rows - T=eltype(R) - R1=Matrix{T}(undef,R1nr,Rnc) - Q1=Matrix{T}(undef,Qnr ,R1nr) - r1=1 + R1nr = Rnr - num_zero_rows + T = eltype(R) + R1 = Matrix{T}(undef, R1nr, Rnc) + Q1 = Matrix{T}(undef, Qnr, R1nr) + r1 = 1 for r in 1:Rnr - if zeros[r]==false - R1[r1,:]=R[r,:] #transfer row - Q1[:,r1]=Q[:,r] #transfer column - r1+=1 #next row in rank reduces matrices. + if zeros[r] == false + R1[r1, :] = R[r, :] #transfer row + Q1[:, r1] = Q[:, r] #transfer column + r1 += 1 #next row in rank reduces matrices. end #if zero end #for r - return R1,Q1 + return R1, Q1 end # # Trim out zero columnss of R within tolerance eps. Also trim the corresponding rows # of Q. # -function trim_columns(R::AbstractMatrix,Q::AbstractMatrix,eps::Float64) where {ElT,IndsT} - R,Q=trim_rows(transpose(R),transpose(Q),eps) - return transpose(R),transpose(Q) +function trim_columns(R::AbstractMatrix, Q::AbstractMatrix, eps::Float64) where {ElT,IndsT} + R, Q = trim_rows(transpose(R), transpose(Q), eps) + return transpose(R), transpose(Q) end function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} @@ -448,14 +448,14 @@ function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,Ind # # Do row removal for rank revealing RQ # - epsrr::Float64 = get(kwargs, :epsrr , -1.0) - if epsrr>=0.0 - RM,QM=trim_rows(RM,QM,epsrr) + epsrr::Float64 = get(kwargs, :epsrr, -1.0) + if epsrr >= 0.0 + RM, QM = trim_rows(RM, QM, epsrr) end # # Make the new indices to go onto Q and R # - IndexT=IndsT.parameters[1] + IndexT = IndsT.parameters[1] nq = IndexT(size(RM)[1]) #dim of the link index Qinds = IndsT((ind(T, 1), nq)) Rinds = IndsT((nq, ind(T, 2))) @@ -473,22 +473,21 @@ function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} else RM, QM = rq(matrix(T)) end - - + # # Do row removal for rank revealing RQ # - epsrr::Float64 = get(kwargs, :epsrr , -1.0) - if epsrr>=0.0 - RM,QM=trim_columns(RM,QM,epsrr) + epsrr::Float64 = get(kwargs, :epsrr, -1.0) + if epsrr >= 0.0 + RM, QM = trim_columns(RM, QM, epsrr) end # # Make the new indices to go onto Q and R # - IndexT=IndsT.parameters[1] + IndexT = IndsT.parameters[1] nq = IndexT(size(RM)[2]) #dim of the link index - Qinds = IndsT((nq,ind(T, 2))) - Rinds = IndsT((ind(T, 1),nq)) + Qinds = IndsT((nq, ind(T, 2))) + Rinds = IndsT((ind(T, 1), nq)) Q = NDTensors.tensor(NDTensors.Dense(vec(Matrix(QM))), Qinds) #Q was strided R = NDTensors.tensor(NDTensors.Dense(vec(RM)), Rinds) return R, Q diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index ec19c08ddc..4dc4f73023 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -39,23 +39,23 @@ end end @testset "Dense Rank revealing QR/RQ decomposition" begin - n, m = 4,8 + n, m = 4, 8 A = randomTensor(n, m) # make some columns lineary dependent - A[2,:]=A[1,:]*1.1 - A[4,:]=A[1,:]*2.1 - Q,R=qr(A;epsrr=1e-12) - @test dim(Q,2)==n-2 #make 2 columns actually got removed. - @test dim(R,1)==n-2 #make 2 rows actually got removed. - @test A ≈ Q * R atol = 1e-12 - nm=dim(Q,2) - @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0,nm)) atol = 1e-12 + A[2, :] = A[1, :] * 1.1 + A[4, :] = A[1, :] * 2.1 + Q, R = qr(A; epsrr=1e-12) + @test dim(Q, 2) == n - 2 #make 2 columns actually got removed. + @test dim(R, 1) == n - 2 #make 2 rows actually got removed. + @test A ≈ Q * R atol = 1e-12 + nm = dim(Q, 2) + @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 - R,Q=rq(A;epsrr=1e-12) - @test dim(Q,1)==n-2 #make 2 rows actually got removed. - @test dim(R,2)==n-2 #make 2 columns actually got removed. - @test A ≈ R * Q atol = 1e-12 - nm=dim(Q,1) - @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0,nm)) atol = 1e-12 + R, Q = rq(A; epsrr=1e-12) + @test dim(Q, 1) == n - 2 #make 2 rows actually got removed. + @test dim(R, 2) == n - 2 #make 2 columns actually got removed. + @test A ≈ R * Q atol = 1e-12 + nm = dim(Q, 1) + @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 end nothing From 67d3594dc63cb87ae5a890d64368cc0b5b45e9f2 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 29 Nov 2022 07:56:10 -0600 Subject: [PATCH 25/90] Augment unit tests to check all index directions are QR decomp --- test/decomp.jl | 115 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 77 insertions(+), 38 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index 4f2ca02337..a1b0615007 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -73,15 +73,26 @@ function rank_fix(A::ITensor, Linds...) if inds(AC) != IndexSet(cL, cR) AC = permute(AC, cL, cR) end - At=tensor(AC) - nc=dim(At,2) - @assert nc>=2 + At = tensor(AC) + nc = dim(At, 2) + @assert nc >= 2 for c in 2:nc - At[:,c]=At[:,1]*1.05^c + At[:, c] = At[:, 1] * 1.05^c end return itensor(At) * dag(CL) * dag(CR) end +# +# verify all QN directions were preserved for A=Q*R decompositions. +# +function test_directions(A::ITensor, Q::ITensor, R::ITensor, q::Index) + for i in noncommoninds(Q, q) + @test dir(findinds(A; tags=tags(i), plev=plev(i))[1]) == dir(i) + end + for i in noncommoninds(R, q) + @test dir(findinds(A; tags=tags(i), plev=plev(i))[1]) == dir(i) + end +end @testset "ITensor Decompositions" begin @testset "truncate!" begin @@ -133,7 +144,12 @@ end end # Julia 1.6 makes it very difficult to split the exceedingly long line of code. - @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in [0,1,2,3], elt in [Float64,ComplexF64] + @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in + [ + 0, 1, 2, 3 + ], + elt in [Float64, ComplexF64] + l = Index(5, "l") s = Index(2, "s") r = Index(5, "r") @@ -151,7 +167,7 @@ end end #Julia 1.6 seems to be very erratic about seeing exported symbols like rq. - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -186,39 +202,41 @@ end end # Julia 1.6 makes it very difficult to split the exceedingly long line of code. - @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in [1,2,3], elt in [Float64,ComplexF64] + @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in + [1, 2, 3], + elt in [Float64, ComplexF64] + l = Index(5, "l") s = Index(2, "s") r = Index(5, "r") - A = randomITensor(elt,l, s, s',r) - + A = randomITensor(elt, l, s, s', r) + Ainds = inds(A) - A=rank_fix(A,Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q = qr(A, Ainds[1:ninds];epsrr=1e-12) #calling qr(A) triggers not supported error. - @test dim(q)==1 #check that we found rank==1 + A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. + Q, R, q = qr(A, Ainds[1:ninds]; epsrr=1e-12) #calling qr(A) triggers not supported error. + @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, q = ITensors.rq(A, Ainds[1:ninds];epsrr=1e-12) - @test dim(q)==1 #check that we found rank==1 + + R, Q, q = ITensors.rq(A, Ainds[1:ninds]; epsrr=1e-12) + @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - L, Q, q = lq(A,Ainds[1:ninds];epsrr=1e-12) - @test dim(q)==1 #check that we found rank==1 + + L, Q, q = lq(A, Ainds[1:ninds]; epsrr=1e-12) + @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ITensors.ql(A,Ainds[1:ninds];epsrr=1e-12) - @test dim(q)==1 #check that we found rank==1 - @test A ≈ Q * L atol = 1e-13 + Q, L, q = ITensors.ql(A, Ainds[1:ninds]; epsrr=1e-12) + @test dim(q) == 1 #check that we found rank==1 + @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in - [0, 1, 2, 3, 4] + @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ + 0, 1, 2, 3, 4 + ] l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") @@ -230,7 +248,7 @@ end @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -254,18 +272,21 @@ end @test length(inds(R)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @test flux(R) == expected_Rflux[ninds + 1] + test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] - expected_Qflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] + expected_Rflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Qflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @test flux(R) == expected_Rflux[ninds + 1] + test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end @@ -287,19 +308,21 @@ end @test length(inds(R)) == 4 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @test flux(R) == expected_Rflux[ninds + 1] + test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Qflux=[QN() ,QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0)] - expected_Rflux=[QN("Sz",0),QN("Sz",0),QN("Sz",0),QN("Sz",0),QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @test flux(R) == expected_Rflux[ninds + 1] + test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end @@ -313,8 +336,8 @@ end @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, r;positive=true) - @test min(diag(R)...)>0.0 + R, Q, q = ITensors.rq(A, r; positive=true) + @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end @@ -326,9 +349,11 @@ end A = randomITensor(l, s, dag(s'), r) Q, R, q = qr(A, l, s, s'; positive=true) @test min(diag(R)...) > 0.0 + test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 - R, Q, q = ITensors.rq(A, r;positive=true) - @test min(diag(R)...)>0.0 + R, Q, q = ITensors.rq(A, r; positive=true) + @test min(diag(R)...) > 0.0 + test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 end @@ -350,6 +375,7 @@ end Q, R, q = qr(W, ilq) @test flux(Q) == QN("Sz", 4) @test flux(R) == QN("Sz", -4) + test_directions(W, Q, R, q) @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. @@ -357,9 +383,24 @@ end @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 R, Q, q = ITensors.rq(W, ilr) - @test flux(Q)==QN("Sz",0) - @test flux(R)==QN("Sz",0) + @test flux(Q) == QN("Sz", 0) + @test flux(R) == QN("Sz", 0) @test W ≈ Q * R atol = 1e-13 + test_directions(W, Q, R, q) + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + Q, L, q = ITensors.ql(W, ilq) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) + @test W ≈ Q * L atol = 1e-13 + test_directions(W, Q, L, q) + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + L, Q, q = ITensors.lq(W, ilr) + @test flux(Q) == QN("Sz", 4) + @test flux(L) == QN("Sz", -4) + @test W ≈ Q * L atol = 1e-13 + test_directions(W, Q, L, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end end @@ -467,8 +508,6 @@ end @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim end end - - end nothing From ba0bfdde7031892e2139945503e7f075ae965271 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 30 Nov 2022 10:17:34 -0600 Subject: [PATCH 26/90] Fix QN direction of qx link for rq decomp. --- NDTensors/src/blocksparse/linearalgebra.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 1b24f23361..dab375d69b 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -352,8 +352,8 @@ function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} qindl = dag(qindl) end - indsQ = setindex(inds(T), dag(qindl), 1) - indsR = setindex(inds(T), qindl, 2) + indsQ = setindex(inds(T), qindl, 1) + indsR = setindex(inds(T), dag(qindl), 2) nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksR = Vector{Block{2}}(undef, nnzblocksT) From 46b757113ff1e10b833f4744159fa5348eae179f Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 30 Nov 2022 10:43:53 -0600 Subject: [PATCH 27/90] Fix flux tests to reflect new QN dir fix for rq decomp --- test/decomp.jl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index a1b0615007..64ecd53691 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -279,8 +279,8 @@ end # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Qflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + expected_Rflux = [QN(), QN("Sz", 2), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] + expected_Qflux = [QN("Sz", 0), QN("Sz", -2), QN("Sz", -2), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @@ -315,8 +315,8 @@ end # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @@ -383,15 +383,15 @@ end @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 R, Q, q = ITensors.rq(W, ilr) - @test flux(Q) == QN("Sz", 0) - @test flux(R) == QN("Sz", 0) + @test flux(Q) == QN("Sz", -4) + @test flux(R) == QN("Sz", 4) @test W ≈ Q * R atol = 1e-13 test_directions(W, Q, R, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 Q, L, q = ITensors.ql(W, ilq) - @test flux(Q) == QN("Sz", 0) - @test flux(L) == QN("Sz", 0) + @test flux(Q) == QN("Sz", -4) + @test flux(L) == QN("Sz", 4) @test W ≈ Q * L atol = 1e-13 test_directions(W, Q, L, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 From ce7512cd9b0b67f5c8d3897a0f5d17748d446ad4 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 2 Dec 2022 08:25:48 -0600 Subject: [PATCH 28/90] Fix flux tests to reflect new QN dir fix for rq decomp --- test/decomp.jl | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index f1ddba8324..85bc8efcb9 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -215,8 +215,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Qflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + expected_Rflux = [QN(), QN("Sz", 2), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] + expected_Qflux = [QN("Sz", 0), QN("Sz", -2), QN("Sz", -2), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @@ -249,8 +249,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @@ -313,10 +313,24 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 R, Q, q = ITensors.rq(W, ilr) - @test flux(Q) == QN("Sz", 0) - @test flux(R) == QN("Sz", 0) + @test flux(Q) == QN("Sz", -4) + @test flux(R) == QN("Sz", 4) @test W ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + Q, L, q = ITensors.ql(W, ilq) + @test flux(Q) == QN("Sz", -4) + @test flux(L) == QN("Sz", 4) + @test W ≈ Q * L atol = 1e-13 + test_directions(W, Q, L, q) + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + L, Q, q = ITensors.lq(W, ilr) + @test flux(Q) == QN("Sz", 4) + @test flux(L) == QN("Sz", -4) + @test W ≈ Q * L atol = 1e-13 + test_directions(W, Q, L, q) + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end end From ad726dcbae8a645cce6e59bd50a8c78d2e137318 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 2 Dec 2022 08:26:35 -0600 Subject: [PATCH 29/90] Remove direction tests --- test/decomp.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/decomp.jl b/test/decomp.jl index 85bc8efcb9..c1213b8509 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -322,14 +322,12 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @test flux(Q) == QN("Sz", -4) @test flux(L) == QN("Sz", 4) @test W ≈ Q * L atol = 1e-13 - test_directions(W, Q, L, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 L, Q, q = ITensors.lq(W, ilr) @test flux(Q) == QN("Sz", 4) @test flux(L) == QN("Sz", -4) @test W ≈ Q * L atol = 1e-13 - test_directions(W, Q, L, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end end From d05220da92e12441290bcf55e37172a5f70a37fc Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 7 Dec 2022 12:43:16 -0600 Subject: [PATCH 30/90] Improvements based on Matt's code review Clean up LinearAlgebra overloads Clean usage of permute() Remove julia<=v1.5 specifc code --- NDTensors/src/blocksparse/linearalgebra.jl | 18 +----------------- NDTensors/src/imports.jl | 2 +- NDTensors/src/linearalgebra.jl | 2 +- NDTensors/test/linearalgebra.jl | 1 - src/imports.jl | 1 + src/tensor_operations/matrix_decomposition.jl | 16 ++++++---------- 6 files changed, 10 insertions(+), 30 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index dab375d69b..334becf9e0 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -376,14 +376,6 @@ function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} blockQ = nzblocksQ[n] blockR = nzblocksR[n] - if VERSION < v"1.5" - # In v1.3 and v1.4 of Julia, Ub has - # a very complicated view wrapper that - # can't be handled efficiently - Qb = copy(Qb) - Rb = copy(Vb) - end - blockview(Q, blockQ) .= Qb blockview(R, blockR) .= Rb end @@ -393,7 +385,7 @@ end # QR a block sparse Rank 2 tensor. # This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function LinearAlgebra.qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} +function qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} # getting total number of blocks nnzblocksT = nnzblocks(T) @@ -471,14 +463,6 @@ function LinearAlgebra.qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} blockQ = nzblocksQ[n] blockR = nzblocksR[n] - if VERSION < v"1.5" - # In v1.3 and v1.4 of Julia, Ub has - # a very complicated view wrapper that - # can't be handled efficiently - Qb = copy(Qb) - Rb = copy(Vb) - end - blockview(Q, blockQ) .= Qb blockview(R, blockR) .= Rb end diff --git a/NDTensors/src/imports.jl b/NDTensors/src/imports.jl index 673e7b25cd..78365708d3 100644 --- a/NDTensors/src/imports.jl +++ b/NDTensors/src/imports.jl @@ -54,6 +54,6 @@ import Base.Broadcast: Broadcasted, BroadcastStyle import Adapt: adapt_structure, adapt_storage -import LinearAlgebra: diag, exp, norm +import LinearAlgebra: diag, exp, norm, qr import TupleTools: isperm diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 7ab93e19c1..fce30564fb 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -384,7 +384,7 @@ function LinearAlgebra.eigen( return D, V, spec end -function LinearAlgebra.qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} +function qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} positive = get(kwargs, :positive, false) # TODO: just call qr on T directly (make sure # that is fast) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index f16ce01160..4777ef21b8 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,7 +1,6 @@ using NDTensors using LinearAlgebra using Test -import NDTensors: rq #rq is in exports.jl, so why the hell do we need this? @testset "random_orthog" begin n, m = 10, 4 diff --git a/src/imports.jl b/src/imports.jl index a26c4857e8..019f052958 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -99,6 +99,7 @@ import LinearAlgebra: factorize, ishermitian, lmul!, + lq, mul!, norm, normalize, diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 1488741ce8..08ee2f9f5d 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -389,14 +389,14 @@ function remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) end #Force users to knowingly ask for zero indices using qr(A,()) syntax -LinearAlgebra.qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) +qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) -LinearAlgebra.lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) +lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) # qr is exported by the LinearAlgebra module so we need acknowledge that to avoid # intermitent run time errors. -function LinearAlgebra.qr(A::ITensor, Linds...; kwargs...) +function qr(A::ITensor, Linds...; kwargs...) qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) @@ -416,9 +416,7 @@ function LinearAlgebra.qr(A::ITensor, Linds...; kwargs...) # # Make sure we don't accidentally pass the transpose into the matrix qr routine. # - if inds(AC) != IndexSet(cL, cR) - AC = permute(AC, cL, cR) - end + AC = permute(AC, cL, cR; allow_alias=true) # qr the matrix. QT, RT = qr(tensor(AC); kwargs...) @@ -476,9 +474,7 @@ function rq(A::ITensor, Linds...; kwargs...) # # Make sure we don't accidentally pass the transpose into the matrix qr routine. # - if inds(AC) != IndexSet(cL, cR) - AC = permute(AC, cL, cR) - end + AC = permute(AC, cL, cR; allow_alias=true) # qr the matrix. RT, QT = NDTensors.rq(tensor(AC); kwargs...) @@ -504,7 +500,7 @@ end # lq is exported by the LinearAlgebra module so we need acknowledge that to avoid # intermitent run time errors. -function LinearAlgebra.lq(A::ITensor, Linds...; kwargs...) +function lq(A::ITensor, Linds...; kwargs...) Q, L, q = qr(A, uniqueinds(A, Linds...); kwargs...) # # fix up the tag name for the index between Q and R. From 70594466730539da53b91835593aa3516f3d8d5d Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 14 Jan 2023 11:29:49 -0600 Subject: [PATCH 31/90] Use map for testing zero rows. --- NDTensors/src/linearalgebra.jl | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index a74ce97cd9..aed12c4c51 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -386,17 +386,9 @@ end # # QR rank reduction helpers # -function find_zero_rows(R::AbstractMatrix, eps::Float64)::Array{Bool} where {ElT,IndsT} +function find_zero_rows(R::AbstractMatrix, rr_cutoff::Float64)::Array{Bool} where {ElT,IndsT} nr, nc = size(R) - zeros = falses(nr) - for r in 1:nr - s = 0.0 - for c in 1:nc - s = max(s, abs(R[r, c])) - end - zeros[r] = (s <= eps) - end - return zeros + return map((r)->(maximum(abs.(R[r,1:nc])) <= rr_cutoff),1:nr ) end # From 405014e04aeff7f07737ac97eb8b55836361c7d8 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 14 Jan 2023 11:30:36 -0600 Subject: [PATCH 32/90] Change from using epsrr to rr_cutoff for zero row threshold. --- NDTensors/src/linearalgebra.jl | 26 +++++++-------- NDTensors/test/linearalgebra.jl | 4 +-- src/imports.jl | 1 + src/tensor_operations/matrix_decomposition.jl | 5 +-- test/decomp.jl | 32 +++++++++---------- 5 files changed, 33 insertions(+), 35 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index aed12c4c51..4460dfb04a 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -392,16 +392,16 @@ function find_zero_rows(R::AbstractMatrix, rr_cutoff::Float64)::Array{Bool} wher end # -# Trim out zero rows of R within tolerance eps. Also trim the corresponding columns +# Trim out zero rows of R within tolerance rr_cutoff. Also trim the corresponding columns # of Q. # -function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, eps::Float64) where {ElT,IndsT} - zeros = find_zero_rows(R, eps) +function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64) where {ElT,IndsT} + zeros = find_zero_rows(R, rr_cutoff) num_zero_rows = sum(zeros) if num_zero_rows == 0 return R, Q end - #@printf "Rank Reveal removing %4i rows with epsrr=%.1e\n" num_zero_rows eps + #@printf "Rank Reveal removing %4i rows with rr_cutoff=%.1e\n" num_zero_rows rr_cutoff Rnr, Rnc = size(R) Qnr, Qnc = size(Q) #@assert Rnr==Qnc Q is strided to we can't asume this @@ -420,11 +420,11 @@ function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, eps::Float64) where {El return R1, Q1 end # -# Trim out zero columnss of R within tolerance eps. Also trim the corresponding rows +# Trim out zero columnss of R within tolerance rr_cutoff. Also trim the corresponding rows # of Q. # -function trim_columns(R::AbstractMatrix, Q::AbstractMatrix, eps::Float64) where {ElT,IndsT} - R, Q = trim_rows(transpose(R), transpose(Q), eps) +function trim_columns(R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64) where {ElT,IndsT} + R, Q = trim_rows(transpose(R), transpose(Q), rr_cutoff) return transpose(R), transpose(Q) end @@ -440,9 +440,9 @@ function qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} # # Do row removal for rank revealing RQ # - epsrr::Float64 = get(kwargs, :epsrr, -1.0) - if epsrr >= 0.0 - RM, QM = trim_rows(RM, QM, epsrr) + rr_cutoff::Float64 = get(kwargs, :rr_cutoff, -1.0) + if rr_cutoff >= 0.0 + RM, QM = trim_rows(RM, QM, rr_cutoff) end # # Make the new indices to go onto Q and R @@ -469,9 +469,9 @@ function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} # # Do row removal for rank revealing RQ # - epsrr::Float64 = get(kwargs, :epsrr, -1.0) - if epsrr >= 0.0 - RM, QM = trim_columns(RM, QM, epsrr) + rr_cutoff::Float64 = get(kwargs, :rr_cutoff, -1.0) + if rr_cutoff >= 0.0 + RM, QM = trim_columns(RM, QM, rr_cutoff) end # # Make the new indices to go onto Q and R diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 463950af81..7f6fb67381 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -43,14 +43,14 @@ end # make some columns lineary dependent A[2, :] = A[1, :] * 1.1 A[4, :] = A[1, :] * 2.1 - Q, R = qr(A; epsrr=1e-12) + Q, R = qr(A; rr_cutoff=1e-12) @test dim(Q, 2) == n - 2 #make 2 columns actually got removed. @test dim(R, 1) == n - 2 #make 2 rows actually got removed. @test A ≈ Q * R atol = 1e-12 nm = dim(Q, 2) @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 - R, Q = rq(A; epsrr=1e-12) + R, Q = rq(A; rr_cutoff=1e-12) @test dim(Q, 1) == n - 2 #make 2 rows actually got removed. @test dim(R, 2) == n - 2 #make 2 columns actually got removed. @test A ≈ R * Q atol = 1e-12 diff --git a/src/imports.jl b/src/imports.jl index 019f052958..4801cfd5f7 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -123,6 +123,7 @@ using ITensors.NDTensors: eachdiagblock, fill!!, randn!!, + rq, single_precision, timer diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 08ee2f9f5d..ebdc051b06 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -388,14 +388,13 @@ function remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) return Q, R end +import NDTensors: rq #Force users to knowingly ask for zero indices using qr(A,()) syntax qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) -# qr is exported by the LinearAlgebra module so we need acknowledge that to avoid -# intermitent run time errors. function qr(A::ITensor, Linds...; kwargs...) qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) @@ -498,8 +497,6 @@ function rq(A::ITensor, Linds...; kwargs...) return R, Q, q end -# lq is exported by the LinearAlgebra module so we need acknowledge that to avoid -# intermitent run time errors. function lq(A::ITensor, Linds...; kwargs...) Q, L, q = qr(A, uniqueinds(A, Linds...); kwargs...) # diff --git a/test/decomp.jl b/test/decomp.jl index 839973f894..e25d5749fc 100644 --- a/test/decomp.jl +++ b/test/decomp.jl @@ -167,7 +167,7 @@ end end #Julia 1.6 seems to be very erratic about seeing exported symbols like rq. - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + R, Q, q = rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -189,7 +189,7 @@ end @test is_lower(L, q) #specify the right index end - Q, L, q = ITensors.ql(A, Ainds[1:ninds]) + Q, L, q = ql(A, Ainds[1:ninds]) @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. @test length(inds(L)) == 3 - ninds + 1 @test A ≈ Q * L atol = 1e-13 @@ -213,22 +213,22 @@ end Ainds = inds(A) A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q = qr(A, Ainds[1:ninds]; epsrr=1e-12) #calling qr(A) triggers not supported error. + Q, R, q = qr(A, Ainds[1:ninds]; rr_cutoff=1e-12) #calling qr(A) triggers not supported error. @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]; epsrr=1e-12) + R, Q, q = rq(A, Ainds[1:ninds]; rr_cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = lq(A, Ainds[1:ninds]; epsrr=1e-12) + L, Q, q = lq(A, Ainds[1:ninds]; rr_cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ITensors.ql(A, Ainds[1:ninds]; epsrr=1e-12) + Q, L, q = ql(A, Ainds[1:ninds]; rr_cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -248,7 +248,7 @@ end @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + R, Q, q = rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -281,7 +281,7 @@ end @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 expected_Rflux = [QN(), QN("Sz", 2), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] expected_Qflux = [QN("Sz", 0), QN("Sz", -2), QN("Sz", -2), QN("Sz", 0), QN()] - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -317,7 +317,7 @@ end expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN("Sz", 0)] expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN()] - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -336,7 +336,7 @@ end @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, r; positive=true) + R, Q, q = rq(A, r; positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -351,7 +351,7 @@ end @test min(diag(R)...) > 0.0 test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 - R, Q, q = ITensors.rq(A, r; positive=true) + R, Q, q = rq(A, r; positive=true) @test min(diag(R)...) > 0.0 test_directions(A, Q, R, q) @test A ≈ Q * R atol = 1e-13 @@ -382,34 +382,34 @@ end # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - R, Q, q = ITensors.rq(W, ilr) + R, Q, q = rq(W, ilr) @test flux(Q) == QN("Sz", -4) @test flux(R) == QN("Sz", 4) @test W ≈ Q * R atol = 1e-13 test_directions(W, Q, R, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - Q, L, q = ITensors.ql(W, ilq) + Q, L, q = ql(W, ilq) @test flux(Q) == QN("Sz", -4) @test flux(L) == QN("Sz", 4) @test W ≈ Q * L atol = 1e-13 test_directions(W, Q, L, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - L, Q, q = ITensors.lq(W, ilr) + L, Q, q = lq(W, ilr) @test flux(Q) == QN("Sz", 4) @test flux(L) == QN("Sz", -4) @test W ≈ Q * L atol = 1e-13 test_directions(W, Q, L, q) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - Q, L, q = ITensors.ql(W, ilq) + Q, L, q = ql(W, ilq) @test flux(Q) == QN("Sz", -4) @test flux(L) == QN("Sz", 4) @test W ≈ Q * L atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - L, Q, q = ITensors.lq(W, ilr) + L, Q, q = lq(W, ilr) @test flux(Q) == QN("Sz", 4) @test flux(L) == QN("Sz", -4) @test W ≈ Q * L atol = 1e-13 From e937732ca705968b62d99781c907bcc6e004e101 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 2 Mar 2023 11:00:54 -0600 Subject: [PATCH 33/90] Handle changes to similar() function interface. --- NDTensors/src/linearalgebra.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 893714377d..61206cf2fb 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -463,7 +463,7 @@ rq!(A::AbstractMatrix) = rq!(A) # about unpacking Q and R from the A matrix. # function rq!(A::StridedMatrix{<:LAPACK.BlasFloat}) - tau = similar(A, Base.min(size(A)...)) + tau = Base.similar(A, Base.min(size(A)...)) x = LAPACK.gerqf!(A, tau) # Unpack R from the lower portion of A, before orgql! mangles it! From 01d86f4133140af1d04a464653342e78dc7f1de7 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 2 Mar 2023 11:17:23 -0600 Subject: [PATCH 34/90] Add QR/RQ code to ensure all flux is moved onto R When QR/RQ decomposing a zero flux tensor, this effectively ensures that there is zero flux between Q and R. One can think of this a reduction in the gauge freedom between Q and R, making the results a little more deterministic. Unit tests are also updated to check the zero flux was achieved. --- src/tensor_operations/matrix_decomposition.jl | 46 ++++++++++++------- test/base/test_decomp.jl | 32 ++++++------- 2 files changed, 46 insertions(+), 32 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 84b633b1e1..68916e87f2 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -428,20 +428,20 @@ function qr(A::ITensor, Linds...; kwargs...) AC = permute(AC, cL, cR; allow_alias=true) # qr the matrix. QT, RT = qr(tensor(AC); kwargs...) - - # We need a use case that fails without this code. - # correcting the fluxes of the two tensors, such that Q has 0 flux for all blocks + # + # correct the fluxes of the two tensors, such that Q has 0 flux for all blocks # and R has the total flux of the system - # if hasqns(AC) - # for b in nzblocks(QT) - # i1 = inds(QT)[1] - # i2 = inds(QT)[2] - # r1 = inds(RT)[1] - # newqn = -dir(i2) * flux(i1 => Block(b[1])) - # ITensors.setblockqn!(i2, newqn, b[2]) - # ITensors.setblockqn!(r1, newqn, b[2]) - # end - # end + # + if hasqns(AC) + for b in nzblocks(QT) + i1 = inds(QT)[1] + i2 = inds(QT)[2] + r1 = inds(RT)[1] + newqn = -dir(i2) * flux(i1 => Block(b[1])) + ITensors.setblockqn!(i2, newqn, b[2]) + ITensors.setblockqn!(r1, newqn, b[2]) + end + end # # Undo the combine oepration, to recover all tensor indices. @@ -486,13 +486,27 @@ function rq(A::ITensor, Linds...; kwargs...) AC = permute(AC, cL, cR; allow_alias=true) # qr the matrix. RT, QT = NDTensors.rq(tensor(AC); kwargs...) - + # + # correct the fluxes of the two tensors, such that Q has 0 flux for all blocks + # and R has the total flux of the system + # + if hasqns(AC) + for b in nzblocks(QT) + + i1 = inds(QT)[1] + i2 = inds(QT)[2] + r2 = inds(RT)[2] + newqn = -dir(i1) * flux(i2 => Block(b[2])) + ITensors.setblockqn!(i1, newqn, b[1]) + ITensors.setblockqn!(r2, newqn, b[1]) + end + end # # Undo the combine oepration, to recover all tensor indices. # R, Q = itensor(RT) * dag(CL), itensor(QT) * dag(CR) - - # Conditionally remove dummy indices. + + # Conditionally remove dummy indices. if (lre) R, Q = remove_trivial_index(R, Q, vαl, vαr) end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index c1213b8509..e05c7d572b 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -197,8 +197,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) [ 0, 1, 2, 3 ] - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -2), QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -0), QN()] l = dag(Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="r") @@ -215,8 +215,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux = [QN(), QN("Sz", 2), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] - expected_Qflux = [QN("Sz", 0), QN("Sz", -2), QN("Sz", -2), QN("Sz", 0), QN()] + expected_Rflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Qflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @@ -230,8 +230,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) [ 0, 1, 2, 3, 4 ] - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") @@ -249,8 +249,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN()] + expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] + expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @@ -304,8 +304,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) ilr = filterinds(W; tags="l=$n")[1] ilq = noncommoninds(W, ilr) Q, R, q = qr(W, ilq) - @test flux(Q) == QN("Sz", 4) - @test flux(R) == QN("Sz", -4) + @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R + @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. @@ -313,20 +313,20 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 R, Q, q = ITensors.rq(W, ilr) - @test flux(Q) == QN("Sz", -4) - @test flux(R) == QN("Sz", 4) + @test flux(Q) == QN("Sz", 0) + @test flux(R) == QN("Sz", 0) @test W ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 Q, L, q = ITensors.ql(W, ilq) - @test flux(Q) == QN("Sz", -4) - @test flux(L) == QN("Sz", 4) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) @test W ≈ Q * L atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 L, Q, q = ITensors.lq(W, ilr) - @test flux(Q) == QN("Sz", 4) - @test flux(L) == QN("Sz", -4) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) @test W ≈ Q * L atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end From 3ac4267e13a747af946ee49429aeb525a68802cd Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 2 Mar 2023 14:23:47 -0600 Subject: [PATCH 35/90] Implement all but one of Matts code review recommendations Remove NDTensor. and LinearAlgebra. qualifiers Remove special code the version <1.5 clean up code for empty Lis/Ris Remove usage of IndexSet Use allow_alias when calling permute (less if statements) Use out of place versions of settags --- NDTensors/src/blocksparse/linearalgebra.jl | 4 +- src/tensor_operations/matrix_decomposition.jl | 55 +++++++------------ 2 files changed, 22 insertions(+), 37 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 334becf9e0..7143af77fd 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -342,7 +342,7 @@ function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} for n in 1:nnzblocksT q_dim_red = minimum(dims(Rs[n])) - NDTensors.setblockdim!(qindl, q_dim_red, n) + setblockdim!(qindl, q_dim_red, n) end # correcting the direction of the arrow @@ -428,7 +428,7 @@ function qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} for n in 1:nnzblocksT q_dim_red = minimum(dims(Rs[n])) - NDTensors.setblockdim!(qindr, q_dim_red, n) + setblockdim!(qindr, q_dim_red, n) end # correcting the direction of the arrow diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 68916e87f2..6a55520c76 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -387,15 +387,10 @@ function add_trivial_index(A::ITensor, Linds, Rinds) return A, vαl, vαr, Linds, Rinds end -function remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) - if !isnothing(vαl) - Q *= dag(vαl) - end - if !isnothing(vαr) - R *= dag(vαr) - end - return Q, R -end +remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) = (Q*dag(vαl), R*dag(vαr)) +remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, vαr) = (Q, R*dag(vαr)) +remove_trivial_index(Q::ITensor, R::ITensor, vαl, ::Nothing) = (Q*dag(vαl), R) +remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing)=(Q,R) #Force users to knowingly ask for zero indices using qr(A,()) syntax qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) @@ -409,12 +404,9 @@ function qr(A::ITensor, Linds...; kwargs...) qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) - lre = isempty(Lis) || isempty(Ris) - # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never + # Make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never # be empty. A essentially becomes 1D after collection. - if (lre) - A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) - end + A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) # # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. @@ -448,16 +440,14 @@ function qr(A::ITensor, Linds...; kwargs...) # Q, R = itensor(QT) * dag(CL), itensor(RT) * dag(CR) - # Conditionally remove dummy indices. - if (lre) - Q, R = remove_trivial_index(Q, R, vαl, vαr) - end + # Remove dummy indices. No-op if vαl and vαr are Nothing + Q, R = remove_trivial_index(Q, R, vαl, vαr) # # fix up the tag name for the index between Q and R. # q = commonind(Q, R) - settags!(Q, qtag, q) - settags!(R, qtag, q) + Q = settags(Q, qtag, q) + R = settags(R, qtag, q) q = settags(q, qtag) return Q, R, q @@ -467,13 +457,10 @@ function rq(A::ITensor, Linds...; kwargs...) qtag::TagSet = get(kwargs, :tags, "Link,rq") #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) - lre = isempty(Lis) || isempty(Ris) # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never # be empty. A essentially becomes 1D after collection. - if (lre) - A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) - end - + A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) + # # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. # @@ -506,16 +493,14 @@ function rq(A::ITensor, Linds...; kwargs...) # R, Q = itensor(RT) * dag(CL), itensor(QT) * dag(CR) - # Conditionally remove dummy indices. - if (lre) - R, Q = remove_trivial_index(R, Q, vαl, vαr) - end + # Conditionally remove dummy indices. + R, Q = remove_trivial_index(R, Q, vαl, vαr) # # fix up the tag name for the index between Q and R. # q = commonind(Q, R) - settags!(Q, qtag, q) - settags!(R, qtag, q) + Q = settags(Q, qtag, q) + R = settags(R, qtag, q) q = settags(q, qtag) return R, Q, q @@ -529,8 +514,8 @@ function lq(A::ITensor, Linds...; kwargs...) # fix up the tag name for the index between Q and R. # qtag::TagSet = get(kwargs, :tags, "Link,lq") #tag for new index between Q and R - settags!(Q, qtag, q) - settags!(L, qtag, q) + Q = settags(Q, qtag, q) + L = settags(L, qtag, q) q = settags(q, qtag) return L, Q, q @@ -542,8 +527,8 @@ function ql(A::ITensor, Linds...; kwargs...) # fix up the tag name for the index between Q and R. # qtag::TagSet = get(kwargs, :tags, "Link,ql") #tag for new index between Q and R - settags!(Q, qtag, q) - settags!(L, qtag, q) + Q = settags(Q, qtag, q) + L = settags(L, qtag, q) q = settags(q, qtag) return L, Q, q From 0d0d120b99e564429a3616920906f83142fe86fe Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 2 Mar 2023 14:26:00 -0600 Subject: [PATCH 36/90] Run the formatter --- src/tensor_operations/matrix_decomposition.jl | 37 +++++++++---------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 6a55520c76..c2c9cc6e8e 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -387,10 +387,10 @@ function add_trivial_index(A::ITensor, Linds, Rinds) return A, vαl, vαr, Linds, Rinds end -remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) = (Q*dag(vαl), R*dag(vαr)) -remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, vαr) = (Q, R*dag(vαr)) -remove_trivial_index(Q::ITensor, R::ITensor, vαl, ::Nothing) = (Q*dag(vαl), R) -remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing)=(Q,R) +remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) = (Q * dag(vαl), R * dag(vαr)) +remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, vαr) = (Q, R * dag(vαr)) +remove_trivial_index(Q::ITensor, R::ITensor, vαl, ::Nothing) = (Q * dag(vαl), R) +remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing) = (Q, R) #Force users to knowingly ask for zero indices using qr(A,()) syntax qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) @@ -426,12 +426,12 @@ function qr(A::ITensor, Linds...; kwargs...) # if hasqns(AC) for b in nzblocks(QT) - i1 = inds(QT)[1] - i2 = inds(QT)[2] - r1 = inds(RT)[1] - newqn = -dir(i2) * flux(i1 => Block(b[1])) - ITensors.setblockqn!(i2, newqn, b[2]) - ITensors.setblockqn!(r1, newqn, b[2]) + i1 = inds(QT)[1] + i2 = inds(QT)[2] + r1 = inds(RT)[1] + newqn = -dir(i2) * flux(i1 => Block(b[1])) + ITensors.setblockqn!(i2, newqn, b[2]) + ITensors.setblockqn!(r1, newqn, b[2]) end end @@ -460,7 +460,7 @@ function rq(A::ITensor, Linds...; kwargs...) # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never # be empty. A essentially becomes 1D after collection. A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) - + # # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. # @@ -479,20 +479,19 @@ function rq(A::ITensor, Linds...; kwargs...) # if hasqns(AC) for b in nzblocks(QT) - - i1 = inds(QT)[1] - i2 = inds(QT)[2] - r2 = inds(RT)[2] - newqn = -dir(i1) * flux(i2 => Block(b[2])) - ITensors.setblockqn!(i1, newqn, b[1]) - ITensors.setblockqn!(r2, newqn, b[1]) + i1 = inds(QT)[1] + i2 = inds(QT)[2] + r2 = inds(RT)[2] + newqn = -dir(i1) * flux(i2 => Block(b[2])) + ITensors.setblockqn!(i1, newqn, b[1]) + ITensors.setblockqn!(r2, newqn, b[1]) end end # # Undo the combine oepration, to recover all tensor indices. # R, Q = itensor(RT) * dag(CL), itensor(QT) * dag(CR) - + # Conditionally remove dummy indices. R, Q = remove_trivial_index(R, Q, vαl, vαr) # From 2a93b6ec7899382771e4635ed689e8528399f794 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 2 Mar 2023 15:10:13 -0600 Subject: [PATCH 37/90] Remove NDTensors. qualifiers --- NDTensors/src/linearalgebra.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 61206cf2fb..6e94d1f6da 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -422,8 +422,8 @@ function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} q = dim(q) < dim(r) ? sim(q) : sim(r) Qinds = IndsT((q, ind(T, 2))) Linds = IndsT((ind(T, 1), q)) - Q = NDTensors.tensor(NDTensors.Dense(vec(Matrix(QM))), Qinds) #Q was strided - R = NDTensors.tensor(NDTensors.Dense(vec(RM)), Linds) + Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided + R = tensor(Dense(vec(RM)), Linds) return R, Q end From 1e8830da3d54a0243d432e91ef7a99729354165c Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 2 Mar 2023 15:11:07 -0600 Subject: [PATCH 38/90] Put keyword arguments directly in signatures --- NDTensors/src/linearalgebra.jl | 7 +++---- src/tensor_operations/matrix_decomposition.jl | 16 ++++++++-------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 6e94d1f6da..100d3f2c95 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -388,8 +388,7 @@ function LinearAlgebra.eigen( return D, V, spec end -function qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} - positive = get(kwargs, :positive, false) +function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} # TODO: just call qr on T directly (make sure # that is fast) if positive @@ -410,8 +409,8 @@ end # # Uses kwargs:positive to decide which rq method to call. # -function rq(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} - if get(kwargs, :positive, false) +function rq(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} + if positive RM, QM = rq_positive(matrix(T)) else RM, QM = rq(matrix(T)) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index c2c9cc6e8e..2da10747be 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -400,8 +400,8 @@ ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) # qr is exported by the LinearAlgebra module so we need acknowledge that to avoid # intermitent run time errors. -function qr(A::ITensor, Linds...; kwargs...) - qtag::TagSet = get(kwargs, :tags, "Link,qr") #tag for new index between Q and R +function qr(A::ITensor, Linds...; tags=ts"Link,qr", kwargs...) + qtag = TagSet(tags) #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) # Make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never @@ -453,8 +453,8 @@ function qr(A::ITensor, Linds...; kwargs...) return Q, R, q end -function rq(A::ITensor, Linds...; kwargs...) - qtag::TagSet = get(kwargs, :tags, "Link,rq") #tag for new index between Q and R +function rq(A::ITensor, Linds...; tags=ts"Link,rq", kwargs...) + qtag = TagSet(tags) #tag for new index between Q and R Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) # make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never @@ -507,12 +507,12 @@ end # lq is exported by the LinearAlgebra module so we need acknowledge that to avoid # intermitent run time errors. -function lq(A::ITensor, Linds...; kwargs...) +function lq(A::ITensor, Linds...; tags=ts"Link,lq", kwargs...) Q, L, q = qr(A, uniqueinds(A, Linds...); kwargs...) # # fix up the tag name for the index between Q and R. # - qtag::TagSet = get(kwargs, :tags, "Link,lq") #tag for new index between Q and R + qtag = TagSet(tags) Q = settags(Q, qtag, q) L = settags(L, qtag, q) q = settags(q, qtag) @@ -520,12 +520,12 @@ function lq(A::ITensor, Linds...; kwargs...) return L, Q, q end -function ql(A::ITensor, Linds...; kwargs...) +function ql(A::ITensor, Linds...; tags=ts"Link,ql", kwargs...) Q, L, q = rq(A, uniqueinds(A, Linds...); kwargs...) # # fix up the tag name for the index between Q and R. # - qtag::TagSet = get(kwargs, :tags, "Link,ql") #tag for new index between Q and R + qtag = TagSet(tags) Q = settags(Q, qtag, q) L = settags(L, qtag, q) q = settags(q, qtag) From 84a6f204decc6d95d700cddd90b6b6d0ef488126 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 3 Mar 2023 17:46:27 -0600 Subject: [PATCH 39/90] Fix flux requirements at the NDTensors level. FOllows the internal index creation of the SVD routine. --- NDTensors/src/blocksparse/linearalgebra.jl | 93 ++++++------------- src/tensor_operations/matrix_decomposition.jl | 30 +----- test/base/test_decomp.jl | 64 ++++++++++--- 3 files changed, 81 insertions(+), 106 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 7143af77fd..4943fb733e 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -321,39 +321,20 @@ function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} Rs[jj] = R end - nb1_lt_nb2 = ( - nblocks(T)[1] < nblocks(T)[2] || - (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) - ) - - # setting the left index of the Q isometry, this should be - # the smaller index of the two indices of of T - qindr = ind(T, 2) - if nb1_lt_nb2 - qindl = sim(ind(T, 1)) - else - qindl = sim(ind(T, 2)) - end - - # can qindl have more blocks than T? - if nblocks(qindl) > nnzblocksT - resize!(qindl, nnzblocksT) - end - - for n in 1:nnzblocksT - q_dim_red = minimum(dims(Rs[n])) - setblockdim!(qindl, q_dim_red, n) - end - - # correcting the direction of the arrow - # if one have to be corrected the other one - # should also be corrected - if (dir(qindl) != dir(qindr)) - qindl = dag(qindl) + # + # Make the new index connecting R and Q + # + itl = ind(T, 1) #left index of T + irq = dag(sim(itl)) #start with similar to the left index of T + resize!(irq, nnzblocksT) #adjust the size to match the block count + for (n, blockT) in enumerate(nzblocksT) + Rdim = size(Rs[n], 2) + b2 = block(itl, blockT[1]) + setblock!(irq, resize(b2, Rdim), n) end - - indsQ = setindex(inds(T), qindl, 1) - indsR = setindex(inds(T), dag(qindl), 2) + + indsQ = setindex(inds(T), dag(irq), 1) #inds(Q)=(irq_dagger,inds(T)[2]) + indsR = setindex(inds(T), irq, 2) #inds(R)=(inds(T)[1],irq) nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksR = Vector{Block{2}}(undef, nnzblocksT) @@ -407,41 +388,21 @@ function qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} Rs[jj] = R end - nb1_lt_nb2 = ( - nblocks(T)[1] < nblocks(T)[2] || - (nblocks(T)[1] == nblocks(T)[2] && dim(T, 1) < dim(T, 2)) - ) - - # setting the right index of the Q isometry, this should be - # the smaller index of the two indices of of T - qindl = ind(T, 1) - if nb1_lt_nb2 - qindr = sim(ind(T, 1)) - else - qindr = sim(ind(T, 2)) - end - - # can qindr have more blocks than T? - if nblocks(qindr) > nnzblocksT - resize!(qindr, nnzblocksT) - end - - for n in 1:nnzblocksT - q_dim_red = minimum(dims(Rs[n])) - setblockdim!(qindr, q_dim_red, n) - end - - # correcting the direction of the arrow - # since qind2r is basically a copy of qind1r - # if one have to be corrected the other one - # should also be corrected - if (dir(qindr) != dir(qindl)) - qindr = dag(qindr) + # + # Make the new index connecting Q and R + # + itl=ind(T, 1) #left index of T + iqr = dag(sim(itl)) #start with similar to the left index of T + resize!(iqr, nnzblocksT) #adjust the size to match the block count + for (n, blockT) in enumerate(nzblocksT) + Qdim = size(Qs[n], 2) #get the block dim on right side of Q. + b1 = block(itl, blockT[1]) + setblock!(iqr, resize(b1, Qdim), n) end - - indsQ = setindex(inds(T), dag(qindr), 2) - indsR = setindex(inds(T), qindr, 1) - + + indsQ = setindex(inds(T), iqr, 2) #inds(Q)=(inds(T)[1],iqr) + indsR = setindex(inds(T), dag(iqr), 1) #inds(R)=(iqr_dagger,inds(T)[2]) + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksR = Vector{Block{2}}(undef, nnzblocksT) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 2da10747be..59a8372c66 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -420,21 +420,6 @@ function qr(A::ITensor, Linds...; tags=ts"Link,qr", kwargs...) AC = permute(AC, cL, cR; allow_alias=true) # qr the matrix. QT, RT = qr(tensor(AC); kwargs...) - # - # correct the fluxes of the two tensors, such that Q has 0 flux for all blocks - # and R has the total flux of the system - # - if hasqns(AC) - for b in nzblocks(QT) - i1 = inds(QT)[1] - i2 = inds(QT)[2] - r1 = inds(RT)[1] - newqn = -dir(i2) * flux(i1 => Block(b[1])) - ITensors.setblockqn!(i2, newqn, b[2]) - ITensors.setblockqn!(r1, newqn, b[2]) - end - end - # # Undo the combine oepration, to recover all tensor indices. # @@ -446,6 +431,7 @@ function qr(A::ITensor, Linds...; tags=ts"Link,qr", kwargs...) # fix up the tag name for the index between Q and R. # q = commonind(Q, R) + Q = settags(Q, qtag, q) R = settags(R, qtag, q) q = settags(q, qtag) @@ -474,20 +460,6 @@ function rq(A::ITensor, Linds...; tags=ts"Link,rq", kwargs...) # qr the matrix. RT, QT = NDTensors.rq(tensor(AC); kwargs...) # - # correct the fluxes of the two tensors, such that Q has 0 flux for all blocks - # and R has the total flux of the system - # - if hasqns(AC) - for b in nzblocks(QT) - i1 = inds(QT)[1] - i2 = inds(QT)[2] - r2 = inds(RT)[2] - newqn = -dir(i1) * flux(i2 => Block(b[2])) - ITensors.setblockqn!(i1, newqn, b[1]) - ITensors.setblockqn!(r2, newqn, b[1]) - end - end - # # Undo the combine oepration, to recover all tensor indices. # R, Q = itensor(RT) * dag(CL), itensor(QT) * dag(CR) diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index e05c7d572b..dc93a34807 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -67,6 +67,45 @@ function is_upper(A::ITensor, r::Index)::Bool end is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) +# +# Build up Hamiltonians with non trival QN spaces in the link indices and further neighbour interactions. +# +function make_Heisenberg_AutoMPO(sites,NNN::Int64;J::Float64=1.0,kwargs...)::MPO + N=length(sites) + @assert N>=NNN + ampo = OpSum() + for dj=1:NNN + f=J/dj + for j=1:N-dj + add!(ampo, f ,"Sz", j, "Sz", j+dj) + add!(ampo, f*0.5,"S+", j, "S-", j+dj) + add!(ampo, f*0.5,"S-", j, "S+", j+dj) + end + end + return MPO(ampo,sites;kwargs...) +end + +function make_Hubbard_AutoMPO(sites,NNN::Int64;U::Float64=1.0,t::Float64=1.0,V::Float64=0.5,kwargs...)::MPO + N=length(sites) + @assert(N>=NNN) + os = OpSum() + for i in 1:N + os += (U, "Nupdn", i) + end + for dn=1:NNN + tj,Vj=t/dn,V/dn + for n in 1:(N - dn) + os += -tj, "Cdagup", n, "Cup", n + dn + os += -tj, "Cdagup", n + dn, "Cup", n + os += -tj, "Cdagdn", n, "Cdn", n + dn + os += -tj, "Cdagdn", n + dn, "Cdn", n + os += Vj, "Ntot" , n, "Ntot", n + dn + end + end + return MPO(os, sites;kwargs...) +end + + @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -288,17 +327,18 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @test A ≈ Q * R atol = 1e-13 end - @testset "QR Heisenberg MPO tensors" begin - N = 4 - sites = siteinds("S=1", N; conserve_qns=true) - ampo = OpSum() - for j in 1:(N - 1) - ampo .+= 0.5, "S+", j, "S-", j + 1 - ampo .+= 0.5, "S-", j, "S+", j + 1 - ampo .+= "Sz", j, "Sz", j + 1 - end - H = MPO(ampo, sites; splitblocks=false) - for n in 1:(N - 1) + + test_combos=[ + (make_Heisenberg_AutoMPO,"S=1/2"), + (make_Hubbard_AutoMPO,"Electron"), + ] + + + @testset "QR MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in test_combos + N,NNN = 10,2 #10 lattice site, up 7th neight interactions + sites = siteinds(test_combo[2], N; conserve_qns=true) + H=test_combo[1](sites,NNN) + for n in 5:5 W = H[n] @test flux(W) == QN("Sz", 0) ilr = filterinds(W; tags="l=$n")[1] @@ -306,6 +346,8 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) Q, R, q = qr(W, ilq) @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. + @test hastags(inds(R)[1],"Link,qr") + @test hastags(inds(Q)[end],"Link,qr") @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. From cf722efe96fb9126c917240952a36aec8eb29b65 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 3 Mar 2023 18:12:17 -0600 Subject: [PATCH 40/90] Format --- NDTensors/src/blocksparse/linearalgebra.jl | 8 +-- test/base/test_decomp.jl | 59 ++++++++++------------ 2 files changed, 31 insertions(+), 36 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 4943fb733e..c52e54f712 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -332,7 +332,7 @@ function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} b2 = block(itl, blockT[1]) setblock!(irq, resize(b2, Rdim), n) end - + indsQ = setindex(inds(T), dag(irq), 1) #inds(Q)=(irq_dagger,inds(T)[2]) indsR = setindex(inds(T), irq, 2) #inds(R)=(inds(T)[1],irq) @@ -391,7 +391,7 @@ function qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} # # Make the new index connecting Q and R # - itl=ind(T, 1) #left index of T + itl = ind(T, 1) #left index of T iqr = dag(sim(itl)) #start with similar to the left index of T resize!(iqr, nnzblocksT) #adjust the size to match the block count for (n, blockT) in enumerate(nzblocksT) @@ -399,10 +399,10 @@ function qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} b1 = block(itl, blockT[1]) setblock!(iqr, resize(b1, Qdim), n) end - + indsQ = setindex(inds(T), iqr, 2) #inds(Q)=(inds(T)[1],iqr) indsR = setindex(inds(T), dag(iqr), 1) #inds(R)=(iqr_dagger,inds(T)[2]) - + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksR = Vector{Block{2}}(undef, nnzblocksT) diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index dc93a34807..8d3d14847a 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -70,42 +70,43 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) # # Build up Hamiltonians with non trival QN spaces in the link indices and further neighbour interactions. # -function make_Heisenberg_AutoMPO(sites,NNN::Int64;J::Float64=1.0,kwargs...)::MPO - N=length(sites) - @assert N>=NNN +function make_Heisenberg_AutoMPO(sites, NNN::Int64; J::Float64=1.0, kwargs...)::MPO + N = length(sites) + @assert N >= NNN ampo = OpSum() - for dj=1:NNN - f=J/dj - for j=1:N-dj - add!(ampo, f ,"Sz", j, "Sz", j+dj) - add!(ampo, f*0.5,"S+", j, "S-", j+dj) - add!(ampo, f*0.5,"S-", j, "S+", j+dj) - end + for dj in 1:NNN + f = J / dj + for j in 1:(N - dj) + add!(ampo, f, "Sz", j, "Sz", j + dj) + add!(ampo, f * 0.5, "S+", j, "S-", j + dj) + add!(ampo, f * 0.5, "S-", j, "S+", j + dj) + end end - return MPO(ampo,sites;kwargs...) + return MPO(ampo, sites; kwargs...) end -function make_Hubbard_AutoMPO(sites,NNN::Int64;U::Float64=1.0,t::Float64=1.0,V::Float64=0.5,kwargs...)::MPO - N=length(sites) - @assert(N>=NNN) +function make_Hubbard_AutoMPO( + sites, NNN::Int64; U::Float64=1.0, t::Float64=1.0, V::Float64=0.5, kwargs... +)::MPO + N = length(sites) + @assert(N >= NNN) os = OpSum() for i in 1:N os += (U, "Nupdn", i) end - for dn=1:NNN - tj,Vj=t/dn,V/dn - for n in 1:(N - dn) + for dn in 1:NNN + tj, Vj = t / dn, V / dn + for n in 1:(N - dn) os += -tj, "Cdagup", n, "Cup", n + dn os += -tj, "Cdagup", n + dn, "Cup", n os += -tj, "Cdagdn", n, "Cdn", n + dn os += -tj, "Cdagdn", n + dn, "Cdn", n - os += Vj, "Ntot" , n, "Ntot", n + dn - end + os += Vj, "Ntot", n, "Ntot", n + dn + end end - return MPO(os, sites;kwargs...) + return MPO(os, sites; kwargs...) end - @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -327,17 +328,13 @@ end @test A ≈ Q * R atol = 1e-13 end - - test_combos=[ - (make_Heisenberg_AutoMPO,"S=1/2"), - (make_Hubbard_AutoMPO,"Electron"), - ] - + test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Electron")] - @testset "QR MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in test_combos - N,NNN = 10,2 #10 lattice site, up 7th neight interactions + @testset "QR MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in + test_combos + N, NNN = 10, 2 #10 lattice site, up 7th neight interactions sites = siteinds(test_combo[2], N; conserve_qns=true) - H=test_combo[1](sites,NNN) + H = test_combo[1](sites, NNN) for n in 5:5 W = H[n] @test flux(W) == QN("Sz", 0) @@ -346,8 +343,6 @@ end Q, R, q = qr(W, ilq) @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. - @test hastags(inds(R)[1],"Link,qr") - @test hastags(inds(Q)[end],"Link,qr") @test W ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. From 7e39a55f01792a5f21d92586b1b605c990e773c7 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 4 Mar 2023 14:40:40 -0600 Subject: [PATCH 41/90] Fix double swap of Q & L in the ql() function --- src/tensor_operations/matrix_decomposition.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 59a8372c66..572fcd26a0 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -493,7 +493,7 @@ function lq(A::ITensor, Linds...; tags=ts"Link,lq", kwargs...) end function ql(A::ITensor, Linds...; tags=ts"Link,ql", kwargs...) - Q, L, q = rq(A, uniqueinds(A, Linds...); kwargs...) + L, Q, q = rq(A, uniqueinds(A, Linds...); kwargs...) # # fix up the tag name for the index between Q and R. # @@ -502,7 +502,7 @@ function ql(A::ITensor, Linds...; tags=ts"Link,ql", kwargs...) L = settags(L, qtag, q) q = settags(q, qtag) - return L, Q, q + return Q, L, q end polar(A::ITensor; kwargs...) = error(noinds_error_message("polar")) From eb955be09e2333d7f5c3a042ed4950e09c746228 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 6 Mar 2023 15:50:08 -0600 Subject: [PATCH 42/90] Use generic function for most of the qr/ql operations. --- NDTensors/src/blocksparse/linearalgebra.jl | 119 +++--------- NDTensors/src/exports.jl | 2 +- NDTensors/src/linearalgebra.jl | 171 +++++++++--------- src/tensor_operations/matrix_decomposition.jl | 102 +++-------- test/base/test_decomp.jl | 64 ++++--- 5 files changed, 180 insertions(+), 278 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index c52e54f712..e0c08c073b 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,141 +296,72 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end -# QR a block sparse Rank 2 tensor. -# This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb +ql(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(ql,T;kwargs...) +qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(qr,T;kwargs...) # -function rq(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} - - # getting total number of blocks - nnzblocksT = nnzblocks(T) - nzblocksT = nzblocks(T) - - Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - - for (jj, b) in enumerate(eachnzblock(T)) - blockT = blockview(T, b) - RQb = rq(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 - - if (isnothing(RQb)) - return nothing - end - - R, Q = RQb - Qs[jj] = Q - Rs[jj] = R - end - - # - # Make the new index connecting R and Q - # - itl = ind(T, 1) #left index of T - irq = dag(sim(itl)) #start with similar to the left index of T - resize!(irq, nnzblocksT) #adjust the size to match the block count - for (n, blockT) in enumerate(nzblocksT) - Rdim = size(Rs[n], 2) - b2 = block(itl, blockT[1]) - setblock!(irq, resize(b2, Rdim), n) - end - - indsQ = setindex(inds(T), dag(irq), 1) #inds(Q)=(irq_dagger,inds(T)[2]) - indsR = setindex(inds(T), irq, 2) #inds(R)=(inds(T)[1],irq) - - nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) - nzblocksR = Vector{Block{2}}(undef, nnzblocksT) - - for n in 1:nnzblocksT - blockT = nzblocksT[n] - - blockR = (blockT[1], UInt(n)) - nzblocksR[n] = blockR - - blockQ = (UInt(n), blockT[2]) - nzblocksQ[n] = blockQ - end - - Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) - R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) - - for n in 1:nnzblocksT - Qb, Rb = Qs[n], Rs[n] - blockQ = nzblocksQ[n] - blockR = nzblocksR[n] - - blockview(Q, blockQ) .= Qb - blockview(R, blockR) .= Rb - end - - return R, Q -end -# QR a block sparse Rank 2 tensor. -# This code thanks to Niklas Tausendpfund https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb +# Generic function to implelement blocks sparse qr/ql decomposition. It calls +# the dense qr or ql for each block. The X tensor = R or L. +# This code thanks to Niklas Tausendpfund +# https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} +function qx(qx::Function,T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} # getting total number of blocks nnzblocksT = nnzblocks(T) nzblocksT = nzblocks(T) Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - Rs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + Xs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) for (jj, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) - QRb = qr(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + QXb = qx(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 - if (isnothing(QRb)) + if (isnothing(QXb)) return nothing end - Q, R = QRb + Q, X = QXb Qs[jj] = Q - Rs[jj] = R + Xs[jj] = X end # # Make the new index connecting Q and R # itl = ind(T, 1) #left index of T - iqr = dag(sim(itl)) #start with similar to the left index of T - resize!(iqr, nnzblocksT) #adjust the size to match the block count + iq = dag(sim(itl)) #start with similar to the left index of T + resize!(iq, nnzblocksT) #adjust the size to match the block count for (n, blockT) in enumerate(nzblocksT) Qdim = size(Qs[n], 2) #get the block dim on right side of Q. b1 = block(itl, blockT[1]) - setblock!(iqr, resize(b1, Qdim), n) + setblock!(iq, resize(b1, Qdim), n) end - indsQ = setindex(inds(T), iqr, 2) #inds(Q)=(inds(T)[1],iqr) - indsR = setindex(inds(T), dag(iqr), 1) #inds(R)=(iqr_dagger,inds(T)[2]) + indsQ = setindex(inds(T), iq, 2) + indsX = setindex(inds(T), dag(iq), 1) nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) - nzblocksR = Vector{Block{2}}(undef, nnzblocksT) + nzblocksX = Vector{Block{2}}(undef, nnzblocksT) for n in 1:nnzblocksT blockT = nzblocksT[n] - - blockQ = (blockT[1], UInt(n)) - nzblocksQ[n] = blockQ - - blockR = (UInt(n), blockT[2]) - nzblocksR[n] = blockR + nzblocksQ[n] = (blockT[1], UInt(n)) + nzblocksX[n] = (UInt(n), blockT[2]) end Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) - R = BlockSparseTensor(ElT, undef, nzblocksR, indsR) + X = BlockSparseTensor(ElT, undef, nzblocksX, indsX) for n in 1:nnzblocksT - Qb, Rb = Qs[n], Rs[n] - blockQ = nzblocksQ[n] - blockR = nzblocksR[n] - - blockview(Q, blockQ) .= Qb - blockview(R, blockR) .= Rb + blockview(Q, nzblocksQ[n]) .= Qs[n] + blockview(X, nzblocksX[n]) .= Xs[n] end - return Q, R + return Q, X end + function exp( T::Union{BlockSparseMatrix{ElT},Hermitian{ElT,<:BlockSparseMatrix{ElT}}} ) where {ElT<:Union{Real,Complex}} diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index 190d78de2b..48541dc98c 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -79,4 +79,4 @@ export store, # linearalgebra.jl - rq + ql diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 100d3f2c95..cd77a28e00 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -303,26 +303,6 @@ random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT<:Real} = random_unitary(El random_orthog(n::Int, m::Int) = random_orthog(Float64, n, m) -""" - qr_positive(M::AbstractMatrix) - -Compute the QR decomposition of a matrix M -such that the diagonal elements of R are -non-negative. Such a QR decomposition of a -matrix is unique. Returns a tuple (Q,R). -""" -function qr_positive(M::AbstractMatrix) - sparseQ, R = qr(M) - Q = convert(Matrix, sparseQ) - nc = size(Q, 2) - for c in 1:nc - if real(R[c, c]) < 0.0 - R[c, c:end] *= -1 - Q[:, c] *= -1 - end - end - return (Q, R) -end function LinearAlgebra.eigen( T::DenseTensor{ElT,2,IndsT}; kwargs... @@ -388,111 +368,124 @@ function LinearAlgebra.eigen( return D, V, spec end -function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} - # TODO: just call qr on T directly (make sure - # that is fast) - if positive - QM, RM = qr_positive(matrix(T)) - else - QM, RM = qr(matrix(T)) - end - # Make the new indices to go onto Q and R - q, r = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) - Qinds = IndsT((ind(T, 1), q)) - Rinds = IndsT((q, ind(T, 2))) - Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided - R = tensor(Dense(vec(RM)), Rinds) - return Q, R + +function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} + qxf= positive ? qr_positive : qr + return qx(qxf,T;kwargs...) +end +function ql(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} + qxf= positive ? ql_positive : ql + return qx(qxf,T;kwargs...) end # -# Uses kwargs:positive to decide which rq method to call. +# Generic function for qr and ql decomposition of dense matrix. +# The X tensor = R or L. # -function rq(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} - if positive - RM, QM = rq_positive(matrix(T)) - else - RM, QM = rq(matrix(T)) - end - +function qx(qx::Function,T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} + QM, XM = qx(matrix(T)) # Make the new indices to go onto Q and R - r, q = inds(T) + q, r = inds(T) q = dim(q) < dim(r) ? sim(q) : sim(r) - Qinds = IndsT((q, ind(T, 2))) - Linds = IndsT((ind(T, 1), q)) + Qinds = IndsT((ind(T, 1), q)) + Xinds = IndsT((q, ind(T, 2))) Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided - R = tensor(Dense(vec(RM)), Linds) - return R, Q + X = tensor(Dense(vec(XM)), Xinds) + return Q, X end # # Just flip signs between Q and R to get all the diagonals of R >=0. # For rectangular M the indexing for "diagonal" is non-trivial. # -function rq_positive(M::AbstractMatrix) - R, sparseQ = rq(M) +""" + qr_positive(M::AbstractMatrix) + +Compute the QR decomposition of a matrix M +such that the diagonal elements of R are +non-negative. Such a QR decomposition of a +matrix is unique. Returns a tuple (Q,R). +""" +function qr_positive(M::AbstractMatrix) + sparseQ, R = qr(M) Q = convert(Matrix, sparseQ) - nr, nc = size(R) - dr = nr > nc ? nr - nc : 0 #diag is shifted down by dr if nr>nc - for r in 1:nr - if r <= nc && real(R[r + dr, r]) < 0.0 - R[1:(r + dr), r] *= -1 - Q[r, :] *= -1 + nc = size(Q, 2) + for c in 1:nc + if real(R[c, c]) < 0.0 + R[c, c:end] *= -1 #only fip non-zero portion of the row. + Q[:, c] *= -1 end end - return (R, Q) + return (Q, R) +end + +""" + ql_positive(M::AbstractMatrix) + +Compute the QL decomposition of a matrix M +such that the diagonal elements of L are +non-negative. Such a QL decomposition of a +matrix is unique. Returns a tuple (Q,L). +""" +function ql_positive(M::AbstractMatrix) + sparseQ, L = ql(M) + Q = convert(Matrix, sparseQ) + nr,nc = size(Q) + dc=nc>nr ? nc-nr : 0 #diag is shifted over by dc if nc>nr + for c in 1:nc + if c<=nr && real(L[c, c+dc]) < 0.0 + L[c, 1:c+dc] *= -1 #only fip non-zero portion of the column. + Q[:,c] *= -1 + end + end + return (Q, L) end # # Lapack replaces A with Q & R carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function rq(A::AbstractMatrix{T}; kwargs...) where {T} +function ql(A::AbstractMatrix{T}; kwargs...) where T Base.require_one_based_indexing(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) copyto!(AA, A) - return rq!(AA; kwargs...) + return ql!(AA; kwargs...) end - -rq!(A::AbstractMatrix) = rq!(A) - # # This is where the low level call to lapack actually occurs. Most of the work is -# about unpacking Q and R from the A matrix. +# about unpacking Q and L from the A matrix. # -function rq!(A::StridedMatrix{<:LAPACK.BlasFloat}) - tau = Base.similar(A, Base.min(size(A)...)) - x = LAPACK.gerqf!(A, tau) - - # Unpack R from the lower portion of A, before orgql! mangles it! - nr, nc = size(A) - mn = Base.min(nr, nc) - R = similar(A, (nr, mn)) - for c in 1:mn - for r in 1:(c + nr - mn) - R[r, c] = A[r, c + nc - mn] +function ql!(A::StridedMatrix{<:LAPACK.BlasFloat}) + tau=Base.similar(A, min(size(A)...)) + x=LAPACK.geqlf!(A, tau) + #save L from the lower portion of A, before orgql! mangles it! + nr,nc=size(A) + mn=min(nr,nc) + L=similar(A,(mn,nc)) + for r in 1:mn + for c in 1:r+nc-mn + L[r,c]=A[r+nr-mn,c] end - for r in (c + 1 + nr - mn):nr - R[r, c] = 0.0 + for c in r+1+nc-mn:nc + L[r,c]=0.0 end end - # - # If nr>nc we need shift the orth vectors from the bottom of Q up to top before - # unpacking the reflectors. - # - if mn < nr - for c in 1:nc - for r in 1:mn - A[r, c] = A[r + nr - mn, c] + # Now we need shift the orth vectors from the right side of Q over the left side, before + if (mn 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="r") @@ -249,29 +249,43 @@ end @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] + @test flux(R) == expected_RLflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Qflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + + Q, L, q = ITensors.ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(L)) == 3-ninds + 1 #+1 to account for new rq,Link index. + @test length(inds(Q)) == ninds + 1 + @test flux(Q) == expected_Qflux[ninds + 1] + @test flux(L) == expected_RLflux[ninds + 1] + @test A ≈ Q * L atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] + @test flux(R) == expected_RLflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + L, Q, q = ITensors.lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(L)) == ninds + 1 #+1 to account for new rq,Link index. + @test length(inds(Q)) == 3 - ninds + 1 + @test flux(Q) == expected_Qflux[ninds + 1] + @test flux(L) == expected_RLflux[ninds + 1] + @test A ≈ Q * L atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR/RQ block sparse on MPO tensor with all possible collections on Q,R" for ninds in - [ + @testset "QR/QL block sparse on MPO tensor with all possible collections on Q,R" for ninds in [ 0, 1, 2, 3, 4 ] expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] + expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") @@ -282,25 +296,23 @@ end @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 4 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] + @test flux(R) == expected_RLflux[ninds + 1] @test A ≈ Q * R atol = 1e-13 # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 4 - ninds + 1 + Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. + @test length(inds(L)) == 4 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] - @test A ≈ Q * R atol = 1e-13 + @test flux(L) == expected_RLflux[ninds + 1] + @test A ≈ Q * L atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR/RQ dense with positive R" begin + @testset "QR/QL/RQ/LQ dense with positive R" begin l = Index(5, "l") s = Index(2, "s") r = Index(10, "r") @@ -309,13 +321,23 @@ end @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + Q, L, q = ITensors.ql(A, l, s, s'; positive=true) + @test min(diag(L)...) > 0.0 + @test A ≈ Q * L atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + R, Q, q = ITensors.rq(A, r; positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + L, Q, q = ITensors.lq(A, r; positive=true) + @test min(diag(L)...) > 0.0 + @test A ≈ Q * L atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + end - @testset "QR/RQ block sparse with positive R" begin + @testset "QR/QL block sparse with positive R" begin l = dag(Index(QN("Sz", 0) => 3; tags="l")) s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") @@ -323,9 +345,9 @@ end Q, R, q = qr(A, l, s, s'; positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 - R, Q, q = ITensors.rq(A, r; positive=true) - @test min(diag(R)...) > 0.0 - @test A ≈ Q * R atol = 1e-13 + Q, L, q = ITensors.ql(A, l, s, s'; positive=true) + @test min(diag(L)...) > 0.0 + @test A ≈ Q * L atol = 1e-13 end test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Electron")] From df6c07f1ca76206b4767e5b103130587c566e220 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 08:27:00 -0600 Subject: [PATCH 43/90] Implement core qx functions accepting both Linds and Rinds wrappers provided for Linds only interface --- NDTensors/src/exports.jl | 3 +- src/imports.jl | 1 + src/tensor_operations/matrix_decomposition.jl | 77 ++++++++++++------- test/base/test_decomp.jl | 46 ++++++++--- 4 files changed, 87 insertions(+), 40 deletions(-) diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index 48541dc98c..5fc664f987 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -50,6 +50,7 @@ export matrix, outer, permutedims!!, + ql, read, vector, write, @@ -79,4 +80,4 @@ export store, # linearalgebra.jl - ql + qr diff --git a/src/imports.jl b/src/imports.jl index 1a8d313c78..09fc2465f5 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -164,6 +164,7 @@ import ITensors.NDTensors: outer, permuteblocks, polar, + ql, scalartype, scale!, setblock!, diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index dee0fb62c1..72797079ce 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -363,12 +363,11 @@ function eigen(A::ITensor; kwargs...) return eigen(A, Lis, Ris; kwargs...) end -function noinds_error_message(decomp::String) - return "$decomp without any input indices is currently not defined. - In the future it may be defined as performing a $decomp decomposition - treating the ITensor as a matrix from the primed to the unprimed indices." -end +# ----------------------------- QR/RQ/QL/LQ decompositions ------------------------------ +# +# Helper functions for handleing cases where zero indices are requested on Q or R. +# function add_trivial_index(A::ITensor, Ainds) α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1) vα = onehot(eltype(A), α => 1) @@ -392,40 +391,62 @@ remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, vαr) = (Q, R * dag(vαr remove_trivial_index(Q::ITensor, R::ITensor, vαl, ::Nothing) = (Q * dag(vαl), R) remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing) = (Q, R) -#Force users to knowingly ask for zero indices using qr(A,()) syntax +# +# Force users to knowingly ask for zero indices using qr(A,()) syntax +# +function noinds_error_message(decomp::String) + return "$decomp without any input indices is currently not defined. + In the future it may be defined as performing a $decomp decomposition + treating the ITensor as a matrix from the primed to the unprimed indices." +end + qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) - - -qr(A::ITensor, Linds...; qtags=ts"Link,qr", kwargs...)=qx(qr,qtags,A,Linds...;kwargs...) -ql(A::ITensor, Linds...; qtags=ts"Link,ql", kwargs...)=qx(NDTensors.ql,qtags,A,Linds...;kwargs...) -rq(A::ITensor, Linds...; qtags=ts"Link,rq", kwargs...)=xq(ql,qtags,A,Linds...;kwargs...) -lq(A::ITensor, Linds...; qtags=ts"Link,lq", kwargs...)=xq(qr,qtags,A,Linds...;kwargs...) - +# +# Use supplied only left indices as a tuple or vector. +# +qr(A::ITensor, Linds::Indices; kwargs...)=qr(A,Linds,uniqueinds(A, Linds);kwargs...) +ql(A::ITensor, Linds::Indices; kwargs...)=ql(A,Linds,uniqueinds(A, Linds);kwargs...) +rq(A::ITensor, Linds::Indices; kwargs...)=rq(A,Linds,uniqueinds(A, Linds);kwargs...) +lq(A::ITensor, Linds::Indices; kwargs...)=lq(A,Linds,uniqueinds(A, Linds);kwargs...) +# +# User supplied only left indices as as vararg +# +qr(A::ITensor, Linds...; kwargs...)=qr(A,Linds,uniqueinds(A, Linds);kwargs...) +ql(A::ITensor, Linds...; kwargs...)=ql(A,Linds,uniqueinds(A, Linds);kwargs...) +rq(A::ITensor, Linds...; kwargs...)=rq(A,Linds,uniqueinds(A, Linds);kwargs...) +lq(A::ITensor, Linds...; kwargs...)=lq(A,Linds,uniqueinds(A, Linds);kwargs...) +# +# Core function where both left and right indices are supplied as tuples of vectors +# Handle default tags and dispatch to generic qx/xq functions. +# +qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", kwargs...)=qx(qr,tags,A,Linds,Rinds;kwargs...) +ql(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,ql", kwargs...)=qx(ql,tags,A,Linds,Rinds;kwargs...) +rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...)=xq(ql,tags,A,Linds,Rinds;kwargs...) +lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", kwargs...)=xq(qr,tags,A,Linds,Rinds;kwargs...) # # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # -function qx(qx::Function, qtags, A::ITensor, Linds...; kwargs...) - Lis = commoninds(A, indices(Linds...)) - Ris = uniqueinds(A, Lis) - # Make a dummy index with dim=1 and incorporate into A so the Lis & Ris can never +function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) + # + # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never # be empty. A essentially becomes 1D after collection. - A, vαl, vαr, Lis, Ris = add_trivial_index(A, Lis, Ris) - # - # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. + A, vαl, vαr, Linds, Rinds = add_trivial_index(A, Linds, Rinds) + # + # Use combiners to render A down to a rank 2 tensor ready for matrix QR/QL routine. # - CL, CR = combiner(Lis...), combiner(Ris...) + CL, CR = combiner(Linds...), combiner(Rinds...) cL, cR = combinedind(CL), combinedind(CR) AC = A * CR * CL # - # Make sure we don't accidentally pass the transpose into the matrix qr routine. + # Make sure we don't accidentally pass the transpose into the matrix qr/ql routine. # AC = permute(AC, cL, cR; allow_alias=true) - # qr the matrix. - QT, XT = qx(tensor(AC); kwargs...) + + QT, XT = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. # # Undo the combine oepration, to recover all tensor indices. # @@ -434,7 +455,7 @@ function qx(qx::Function, qtags, A::ITensor, Linds...; kwargs...) # Remove dummy indices. No-op if vαl and vαr are Nothing Q, X = remove_trivial_index(Q, X, vαl, vαr) # - # fix up the tag name for the index between Q and R. + # fix up the tag name for the index between Q and X. # q = commonind(Q, X) Q = settags(Q, qtags, q) @@ -445,11 +466,11 @@ function qx(qx::Function, qtags, A::ITensor, Linds...; kwargs...) end # -# Generic function implementing both rq and lq decomposition. Implemented using qr/ql combinedind +# Generic function implementing both rq and lq decomposition. Implemented using qr/ql # with swapping the left and right indices. The X tensor = R or L. # -function xq(qx::Function, qtags::TagSet,A::ITensor, Linds...;kwargs...) - Q, X, q = qx(A, uniqueinds(A, Linds...); kwargs...) +function xq(qx::Function, qtags::TagSet,A::ITensor, Linds::Indices, Rinds::Indices;kwargs...) + Q, X, q = qx(A, Rinds, Linds; kwargs...) # # fix up the tag name for the index between Q and L. # diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index efa07c7dbe..22180666d4 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -167,18 +167,26 @@ end r = Index(5, "r") A = randomITensor(elt, l, s, r) Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + Linds=Ainds[1:ninds] + Rinds=uniqueinds(A, Linds...) + Q, R, q = qr(A, Linds;tags="Link,qr1") #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q == commonind(Q, R) - @test hastags(q, "qr") + @test hastags(q, "qr1") if (length(inds(R)) > 1) @test is_upper(q, R) #specify the left index end - - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + Q1, R1, q1 = qr(A, Linds, Rinds;tags="Link,myqr") #make sure the same call with both L & R indices give the same answer. + Q1=replaceind(Q1,q1,q) + R1=replaceind(R1,q1,q) + @test norm(Q-Q1)==0.0 + @test norm(R-R1)==0.0 + @test hastags(q1, "Link,myqr") + + R, Q, q = rq(A, Linds) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -188,8 +196,15 @@ end if (length(inds(R)) > 1) @test is_upper(R, q) #specify the right index end - - L, Q, q = lq(A, Ainds[1:ninds]) + R1, Q1, q1 = rq(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. + Q1=replaceind(Q1,q1,q) + R1=replaceind(R1,q1,q) + @test norm(Q-Q1)==0.0 + @test norm(R-R1)==0.0 + # @test hastags(q, "myrq") + # @test hastags(q, "Link") + + L, Q, q = lq(A, Linds) @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @@ -199,8 +214,13 @@ end if (length(inds(L)) > 1) @test is_lower(L, q) #specify the right index end + L1, Q1, q1 = lq(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. + Q1=replaceind(Q1,q1,q) + L1=replaceind(L1,q1,q) + @test norm(Q-Q1)==0.0 + @test norm(L-L1)==0.0 - Q, L, q = ITensors.ql(A, Ainds[1:ninds]) + Q, L, q = ql(A, Linds) @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. @test length(inds(L)) == 3 - ninds + 1 @test A ≈ Q * L atol = 1e-13 @@ -210,10 +230,14 @@ end if (length(inds(L)) > 1) @test is_lower(q, L) #specify the right index end + Q1, L1, q1 = ql(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. + Q1=replaceind(Q1,q1,q) + L1=replaceind(L1,q1,q) + @test norm(Q-Q1)==0.0 + @test norm(L-L1)==0.0 end - @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ - 0, 1, 2, 3, 4 + @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [0, 1, 2, 3, 4 ] l = Index(5, "l") s = Index(2, "s") @@ -342,10 +366,10 @@ end s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, dag(s'), r) - Q, R, q = qr(A, l, s, s'; positive=true) + Q, R, q = qr(A, l, s, dag(s'); positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 - Q, L, q = ITensors.ql(A, l, s, s'; positive=true) + Q, L, q = ITensors.ql(A, l, s, dag(s'); positive=true) @test min(diag(L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 end From 9223523583120a4a09eaef9dbca9316b8cd737f0 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 08:28:24 -0600 Subject: [PATCH 44/90] Format --- NDTensors/src/blocksparse/linearalgebra.jl | 11 ++- NDTensors/src/linearalgebra.jl | 67 +++++++++---------- src/tensor_operations/matrix_decomposition.jl | 39 ++++++----- test/base/test_decomp.jl | 53 ++++++++------- 4 files changed, 88 insertions(+), 82 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index e0c08c073b..22434db26f 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,15 +296,15 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end -ql(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(ql,T;kwargs...) -qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(qr,T;kwargs...) +ql(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(ql, T; kwargs...) +qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(qr, T; kwargs...) # # Generic function to implelement blocks sparse qr/ql decomposition. It calls # the dense qr or ql for each block. The X tensor = R or L. # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function,T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} +function qx(qx::Function, T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} # getting total number of blocks nnzblocksT = nnzblocks(T) @@ -338,8 +338,8 @@ function qx(qx::Function,T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} setblock!(iq, resize(b1, Qdim), n) end - indsQ = setindex(inds(T), iq, 2) - indsX = setindex(inds(T), dag(iq), 1) + indsQ = setindex(inds(T), iq, 2) + indsX = setindex(inds(T), dag(iq), 1) nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) nzblocksX = Vector{Block{2}}(undef, nnzblocksT) @@ -361,7 +361,6 @@ function qx(qx::Function,T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} return Q, X end - function exp( T::Union{BlockSparseMatrix{ElT},Hermitian{ElT,<:BlockSparseMatrix{ElT}}} ) where {ElT<:Union{Real,Complex}} diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index cd77a28e00..f144eb3760 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -303,7 +303,6 @@ random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT<:Real} = random_unitary(El random_orthog(n::Int, m::Int) = random_orthog(Float64, n, m) - function LinearAlgebra.eigen( T::DenseTensor{ElT,2,IndsT}; kwargs... ) where {ElT<:Union{Real,Complex},IndsT} @@ -368,21 +367,20 @@ function LinearAlgebra.eigen( return D, V, spec end - -function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} - qxf= positive ? qr_positive : qr - return qx(qxf,T;kwargs...) +function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} + qxf = positive ? qr_positive : qr + return qx(qxf, T; kwargs...) end -function ql(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} - qxf= positive ? ql_positive : ql - return qx(qxf,T;kwargs...) +function ql(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} + qxf = positive ? ql_positive : ql + return qx(qxf, T; kwargs...) end # # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function,T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} +function qx(qx::Function, T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} QM, XM = qx(matrix(T)) # Make the new indices to go onto Q and R q, r = inds(T) @@ -430,12 +428,12 @@ matrix is unique. Returns a tuple (Q,L). function ql_positive(M::AbstractMatrix) sparseQ, L = ql(M) Q = convert(Matrix, sparseQ) - nr,nc = size(Q) - dc=nc>nr ? nc-nr : 0 #diag is shifted over by dc if nc>nr + nr, nc = size(Q) + dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr for c in 1:nc - if c<=nr && real(L[c, c+dc]) < 0.0 - L[c, 1:c+dc] *= -1 #only fip non-zero portion of the column. - Q[:,c] *= -1 + if c <= nr && real(L[c, c + dc]) < 0.0 + L[c, 1:(c + dc)] *= -1 #only fip non-zero portion of the column. + Q[:, c] *= -1 end end return (Q, L) @@ -445,7 +443,7 @@ end # Lapack replaces A with Q & R carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function ql(A::AbstractMatrix{T}; kwargs...) where T +function ql(A::AbstractMatrix{T}; kwargs...) where {T} Base.require_one_based_indexing(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) copyto!(AA, A) @@ -456,36 +454,35 @@ end # about unpacking Q and L from the A matrix. # function ql!(A::StridedMatrix{<:LAPACK.BlasFloat}) - tau=Base.similar(A, min(size(A)...)) - x=LAPACK.geqlf!(A, tau) + tau = Base.similar(A, min(size(A)...)) + x = LAPACK.geqlf!(A, tau) #save L from the lower portion of A, before orgql! mangles it! - nr,nc=size(A) - mn=min(nr,nc) - L=similar(A,(mn,nc)) + nr, nc = size(A) + mn = min(nr, nc) + L = similar(A, (mn, nc)) for r in 1:mn - for c in 1:r+nc-mn - L[r,c]=A[r+nr-mn,c] + for c in 1:(r + nc - mn) + L[r, c] = A[r + nr - mn, c] end - for c in r+1+nc-mn:nc - L[r,c]=0.0 + for c in (r + 1 + nc - mn):nc + L[r, c] = 0.0 end end # Now we need shift the orth vectors from the right side of Q over the left side, before - if (mn 1) @test is_upper(q, R) #specify the left index end - Q1, R1, q1 = qr(A, Linds, Rinds;tags="Link,myqr") #make sure the same call with both L & R indices give the same answer. - Q1=replaceind(Q1,q1,q) - R1=replaceind(R1,q1,q) - @test norm(Q-Q1)==0.0 - @test norm(R-R1)==0.0 + Q1, R1, q1 = qr(A, Linds, Rinds; tags="Link,myqr") #make sure the same call with both L & R indices give the same answer. + Q1 = replaceind(Q1, q1, q) + R1 = replaceind(R1, q1, q) + @test norm(Q - Q1) == 0.0 + @test norm(R - R1) == 0.0 @test hastags(q1, "Link,myqr") - + R, Q, q = rq(A, Linds) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @@ -197,10 +197,10 @@ end @test is_upper(R, q) #specify the right index end R1, Q1, q1 = rq(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. - Q1=replaceind(Q1,q1,q) - R1=replaceind(R1,q1,q) - @test norm(Q-Q1)==0.0 - @test norm(R-R1)==0.0 + Q1 = replaceind(Q1, q1, q) + R1 = replaceind(R1, q1, q) + @test norm(Q - Q1) == 0.0 + @test norm(R - R1) == 0.0 # @test hastags(q, "myrq") # @test hastags(q, "Link") @@ -215,10 +215,10 @@ end @test is_lower(L, q) #specify the right index end L1, Q1, q1 = lq(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. - Q1=replaceind(Q1,q1,q) - L1=replaceind(L1,q1,q) - @test norm(Q-Q1)==0.0 - @test norm(L-L1)==0.0 + Q1 = replaceind(Q1, q1, q) + L1 = replaceind(L1, q1, q) + @test norm(Q - Q1) == 0.0 + @test norm(L - L1) == 0.0 Q, L, q = ql(A, Linds) @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. @@ -231,13 +231,14 @@ end @test is_lower(q, L) #specify the right index end Q1, L1, q1 = ql(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. - Q1=replaceind(Q1,q1,q) - L1=replaceind(L1,q1,q) - @test norm(Q-Q1)==0.0 - @test norm(L-L1)==0.0 + Q1 = replaceind(Q1, q1, q) + L1 = replaceind(L1, q1, q) + @test norm(Q - Q1) == 0.0 + @test norm(L - L1) == 0.0 end - @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [0, 1, 2, 3, 4 + @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ + 0, 1, 2, 3, 4 ] l = Index(5, "l") s = Index(2, "s") @@ -279,9 +280,9 @@ end # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - + Q, L, q = ITensors.ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(L)) == 3-ninds + 1 #+1 to account for new rq,Link index. + @test length(inds(L)) == 3 - ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @test flux(L) == expected_RLflux[ninds + 1] @@ -305,7 +306,8 @@ end @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR/QL block sparse on MPO tensor with all possible collections on Q,R" for ninds in [ + @testset "QR/QL block sparse on MPO tensor with all possible collections on Q,R" for ninds in + [ 0, 1, 2, 3, 4 ] expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] @@ -358,7 +360,6 @@ end @test min(diag(L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end @testset "QR/QL block sparse with positive R" begin From 462ffbfb0cc9dbd5733ef76f1f94fde6822e5afa Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 17:07:05 -0600 Subject: [PATCH 45/90] Stri[ out extra indices in Linds, not in A. Need this to pass som eunit tests in test/base/test_itensor.jl --- src/tensor_operations/matrix_decomposition.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index ff1fdddbb0..0519dd95d4 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -438,6 +438,10 @@ end # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) + # Strip out any extra indices that are not in A. + # Unit test test/base/test_itensor.jl line 1469 will fail without this. + Linds=commoninds(A,Linds) + #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem? # # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never # be empty. A essentially becomes 1D after collection. From 9f80aa27da57b5c7f19f754d69aacbe23b46ce86 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 17:23:55 -0600 Subject: [PATCH 46/90] Move Heisenberg and Hubbards MPO QR tests over legacy area. And format --- src/tensor_operations/matrix_decomposition.jl | 2 +- test/ITensorLegacyMPS/base/test_qnmpo.jl | 80 ++++++++++++++++++ test/base/test_decomp.jl | 81 ------------------- 3 files changed, 81 insertions(+), 82 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 0519dd95d4..f8be4b0a3c 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -440,7 +440,7 @@ end function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) # Strip out any extra indices that are not in A. # Unit test test/base/test_itensor.jl line 1469 will fail without this. - Linds=commoninds(A,Linds) + Linds = commoninds(A, Linds) #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem? # # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never diff --git a/test/ITensorLegacyMPS/base/test_qnmpo.jl b/test/ITensorLegacyMPS/base/test_qnmpo.jl index fb05a24aa7..d0c542cf54 100644 --- a/test/ITensorLegacyMPS/base/test_qnmpo.jl +++ b/test/ITensorLegacyMPS/base/test_qnmpo.jl @@ -293,4 +293,84 @@ end end end +# +# Build up Hamiltonians with non trival QN spaces in the link indices and further neighbour interactions. +# +function make_Heisenberg_AutoMPO(sites, NNN::Int64; J::Float64=1.0, kwargs...)::MPO + N = length(sites) + @assert N >= NNN + ampo = OpSum() + for dj in 1:NNN + f = J / dj + for j in 1:(N - dj) + add!(ampo, f, "Sz", j, "Sz", j + dj) + add!(ampo, f * 0.5, "S+", j, "S-", j + dj) + add!(ampo, f * 0.5, "S-", j, "S+", j + dj) + end + end + return MPO(ampo, sites; kwargs...) +end + +function make_Hubbard_AutoMPO( + sites, NNN::Int64; U::Float64=1.0, t::Float64=1.0, V::Float64=0.5, kwargs... +)::MPO + N = length(sites) + @assert(N >= NNN) + os = OpSum() + for i in 1:N + os += (U, "Nupdn", i) + end + for dn in 1:NNN + tj, Vj = t / dn, V / dn + for n in 1:(N - dn) + os += -tj, "Cdagup", n, "Cup", n + dn + os += -tj, "Cdagup", n + dn, "Cup", n + os += -tj, "Cdagdn", n, "Cdn", n + dn + os += -tj, "Cdagdn", n + dn, "Cdn", n + os += Vj, "Ntot", n, "Ntot", n + dn + end + end + return MPO(os, sites; kwargs...) +end + +test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Electron")] + +@testset "QR/QL MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in + test_combos + N, NNN = 10, 7 #10 lattice site, up 7th neight interactions + sites = siteinds(test_combo[2], N; conserve_qns=true) + H = test_combo[1](sites, NNN) + for n in 1:N-1 + W = H[n] + @test flux(W) == QN("Sz", 0) + ilr = filterinds(W; tags="l=$n")[1] + ilq = noncommoninds(W, ilr) + Q, R, q = qr(W, ilq) + @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R + @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. + @test W ≈ Q * R atol = 1e-13 + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. + # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. + # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + R, Q, q = ITensors.rq(W, ilr) + @test flux(Q) == QN("Sz", 0) + @test flux(R) == QN("Sz", 0) + @test W ≈ Q * R atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + Q, L, q = ITensors.ql(W, ilq) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) + @test W ≈ Q * L atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + L, Q, q = ITensors.lq(W, ilr) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) + @test W ≈ Q * L atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + end +end nothing diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 584f64d653..6c4a7e702f 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -67,46 +67,6 @@ function is_upper(A::ITensor, r::Index)::Bool end is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) -# -# Build up Hamiltonians with non trival QN spaces in the link indices and further neighbour interactions. -# -function make_Heisenberg_AutoMPO(sites, NNN::Int64; J::Float64=1.0, kwargs...)::MPO - N = length(sites) - @assert N >= NNN - ampo = OpSum() - for dj in 1:NNN - f = J / dj - for j in 1:(N - dj) - add!(ampo, f, "Sz", j, "Sz", j + dj) - add!(ampo, f * 0.5, "S+", j, "S-", j + dj) - add!(ampo, f * 0.5, "S-", j, "S+", j + dj) - end - end - return MPO(ampo, sites; kwargs...) -end - -function make_Hubbard_AutoMPO( - sites, NNN::Int64; U::Float64=1.0, t::Float64=1.0, V::Float64=0.5, kwargs... -)::MPO - N = length(sites) - @assert(N >= NNN) - os = OpSum() - for i in 1:N - os += (U, "Nupdn", i) - end - for dn in 1:NNN - tj, Vj = t / dn, V / dn - for n in 1:(N - dn) - os += -tj, "Cdagup", n, "Cup", n + dn - os += -tj, "Cdagup", n + dn, "Cup", n - os += -tj, "Cdagdn", n, "Cdn", n + dn - os += -tj, "Cdagdn", n + dn, "Cdn", n - os += Vj, "Ntot", n, "Ntot", n + dn - end - end - return MPO(os, sites; kwargs...) -end - @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -375,47 +335,6 @@ end @test A ≈ Q * L atol = 1e-13 end - test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Electron")] - - @testset "QR MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in - test_combos - N, NNN = 10, 2 #10 lattice site, up 7th neight interactions - sites = siteinds(test_combo[2], N; conserve_qns=true) - H = test_combo[1](sites, NNN) - for n in 5:5 - W = H[n] - @test flux(W) == QN("Sz", 0) - ilr = filterinds(W; tags="l=$n")[1] - ilq = noncommoninds(W, ilr) - Q, R, q = qr(W, ilq) - @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R - @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. - @test W ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - R, Q, q = ITensors.rq(W, ilr) - @test flux(Q) == QN("Sz", 0) - @test flux(R) == QN("Sz", 0) - @test W ≈ Q * R atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - Q, L, q = ITensors.ql(W, ilq) - @test flux(Q) == QN("Sz", 0) - @test flux(L) == QN("Sz", 0) - @test W ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - L, Q, q = ITensors.lq(W, ilr) - @test flux(Q) == QN("Sz", 0) - @test flux(L) == QN("Sz", 0) - @test W ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - end - @testset "factorize with QR" begin l = Index(5, "l") s = Index(2, "s") From f931cbe1c17c23d2bd22027bc3d03b125beafb79 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 17:49:50 -0600 Subject: [PATCH 47/90] Format --- test/ITensorLegacyMPS/base/test_qnmpo.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ITensorLegacyMPS/base/test_qnmpo.jl b/test/ITensorLegacyMPS/base/test_qnmpo.jl index d0c542cf54..9f3a487f0b 100644 --- a/test/ITensorLegacyMPS/base/test_qnmpo.jl +++ b/test/ITensorLegacyMPS/base/test_qnmpo.jl @@ -340,7 +340,7 @@ test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Elect N, NNN = 10, 7 #10 lattice site, up 7th neight interactions sites = siteinds(test_combo[2], N; conserve_qns=true) H = test_combo[1](sites, NNN) - for n in 1:N-1 + for n in 1:(N - 1) W = H[n] @test flux(W) == QN("Sz", 0) ilr = filterinds(W; tags="l=$n")[1] From a31fb5c98dd70170108e0b1d1f3bde5299b9fcd1 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 17:07:05 -0600 Subject: [PATCH 48/90] Strip out extra indices in Linds, not in A. Need this to pass som eunit tests in test/base/test_itensor.jl --- src/tensor_operations/matrix_decomposition.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index ff1fdddbb0..0519dd95d4 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -438,6 +438,10 @@ end # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) + # Strip out any extra indices that are not in A. + # Unit test test/base/test_itensor.jl line 1469 will fail without this. + Linds=commoninds(A,Linds) + #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem? # # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never # be empty. A essentially becomes 1D after collection. From 492443c875c48e2bd1b5a81ff911ba2196fcb8b5 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 17:23:55 -0600 Subject: [PATCH 49/90] Move Heisenberg and Hubbards MPO QR tests over legacy area. And format --- src/tensor_operations/matrix_decomposition.jl | 2 +- test/ITensorLegacyMPS/base/test_qnmpo.jl | 80 ++++++++++++++++++ test/base/test_decomp.jl | 81 ------------------- 3 files changed, 81 insertions(+), 82 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 0519dd95d4..f8be4b0a3c 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -440,7 +440,7 @@ end function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) # Strip out any extra indices that are not in A. # Unit test test/base/test_itensor.jl line 1469 will fail without this. - Linds=commoninds(A,Linds) + Linds = commoninds(A, Linds) #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem? # # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never diff --git a/test/ITensorLegacyMPS/base/test_qnmpo.jl b/test/ITensorLegacyMPS/base/test_qnmpo.jl index fb05a24aa7..d0c542cf54 100644 --- a/test/ITensorLegacyMPS/base/test_qnmpo.jl +++ b/test/ITensorLegacyMPS/base/test_qnmpo.jl @@ -293,4 +293,84 @@ end end end +# +# Build up Hamiltonians with non trival QN spaces in the link indices and further neighbour interactions. +# +function make_Heisenberg_AutoMPO(sites, NNN::Int64; J::Float64=1.0, kwargs...)::MPO + N = length(sites) + @assert N >= NNN + ampo = OpSum() + for dj in 1:NNN + f = J / dj + for j in 1:(N - dj) + add!(ampo, f, "Sz", j, "Sz", j + dj) + add!(ampo, f * 0.5, "S+", j, "S-", j + dj) + add!(ampo, f * 0.5, "S-", j, "S+", j + dj) + end + end + return MPO(ampo, sites; kwargs...) +end + +function make_Hubbard_AutoMPO( + sites, NNN::Int64; U::Float64=1.0, t::Float64=1.0, V::Float64=0.5, kwargs... +)::MPO + N = length(sites) + @assert(N >= NNN) + os = OpSum() + for i in 1:N + os += (U, "Nupdn", i) + end + for dn in 1:NNN + tj, Vj = t / dn, V / dn + for n in 1:(N - dn) + os += -tj, "Cdagup", n, "Cup", n + dn + os += -tj, "Cdagup", n + dn, "Cup", n + os += -tj, "Cdagdn", n, "Cdn", n + dn + os += -tj, "Cdagdn", n + dn, "Cdn", n + os += Vj, "Ntot", n, "Ntot", n + dn + end + end + return MPO(os, sites; kwargs...) +end + +test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Electron")] + +@testset "QR/QL MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in + test_combos + N, NNN = 10, 7 #10 lattice site, up 7th neight interactions + sites = siteinds(test_combo[2], N; conserve_qns=true) + H = test_combo[1](sites, NNN) + for n in 1:N-1 + W = H[n] + @test flux(W) == QN("Sz", 0) + ilr = filterinds(W; tags="l=$n")[1] + ilq = noncommoninds(W, ilr) + Q, R, q = qr(W, ilq) + @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R + @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. + @test W ≈ Q * R atol = 1e-13 + # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. + # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. + # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + R, Q, q = ITensors.rq(W, ilr) + @test flux(Q) == QN("Sz", 0) + @test flux(R) == QN("Sz", 0) + @test W ≈ Q * R atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + Q, L, q = ITensors.ql(W, ilq) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) + @test W ≈ Q * L atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + + L, Q, q = ITensors.lq(W, ilr) + @test flux(Q) == QN("Sz", 0) + @test flux(L) == QN("Sz", 0) + @test W ≈ Q * L atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + end +end nothing diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 584f64d653..6c4a7e702f 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -67,46 +67,6 @@ function is_upper(A::ITensor, r::Index)::Bool end is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) -# -# Build up Hamiltonians with non trival QN spaces in the link indices and further neighbour interactions. -# -function make_Heisenberg_AutoMPO(sites, NNN::Int64; J::Float64=1.0, kwargs...)::MPO - N = length(sites) - @assert N >= NNN - ampo = OpSum() - for dj in 1:NNN - f = J / dj - for j in 1:(N - dj) - add!(ampo, f, "Sz", j, "Sz", j + dj) - add!(ampo, f * 0.5, "S+", j, "S-", j + dj) - add!(ampo, f * 0.5, "S-", j, "S+", j + dj) - end - end - return MPO(ampo, sites; kwargs...) -end - -function make_Hubbard_AutoMPO( - sites, NNN::Int64; U::Float64=1.0, t::Float64=1.0, V::Float64=0.5, kwargs... -)::MPO - N = length(sites) - @assert(N >= NNN) - os = OpSum() - for i in 1:N - os += (U, "Nupdn", i) - end - for dn in 1:NNN - tj, Vj = t / dn, V / dn - for n in 1:(N - dn) - os += -tj, "Cdagup", n, "Cup", n + dn - os += -tj, "Cdagup", n + dn, "Cup", n - os += -tj, "Cdagdn", n, "Cdn", n + dn - os += -tj, "Cdagdn", n + dn, "Cdn", n - os += Vj, "Ntot", n, "Ntot", n + dn - end - end - return MPO(os, sites; kwargs...) -end - @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -375,47 +335,6 @@ end @test A ≈ Q * L atol = 1e-13 end - test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Electron")] - - @testset "QR MPO tensors with complex block structures, H=$(test_combo[1])" for test_combo in - test_combos - N, NNN = 10, 2 #10 lattice site, up 7th neight interactions - sites = siteinds(test_combo[2], N; conserve_qns=true) - H = test_combo[1](sites, NNN) - for n in 5:5 - W = H[n] - @test flux(W) == QN("Sz", 0) - ilr = filterinds(W; tags="l=$n")[1] - ilq = noncommoninds(W, ilr) - Q, R, q = qr(W, ilq) - @test flux(Q) == QN("Sz", 0) #qr should move all flux on W (0 in this case) onto R - @test flux(R) == QN("Sz", 0) #this effectively removes all flux between Q and R in thie case. - @test W ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - R, Q, q = ITensors.rq(W, ilr) - @test flux(Q) == QN("Sz", 0) - @test flux(R) == QN("Sz", 0) - @test W ≈ Q * R atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - Q, L, q = ITensors.ql(W, ilq) - @test flux(Q) == QN("Sz", 0) - @test flux(L) == QN("Sz", 0) - @test W ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - L, Q, q = ITensors.lq(W, ilr) - @test flux(Q) == QN("Sz", 0) - @test flux(L) == QN("Sz", 0) - @test W ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - end - @testset "factorize with QR" begin l = Index(5, "l") s = Index(2, "s") From 35e560f1c1d93f8ec847c2f50aad4d8726d43cd9 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 17:49:50 -0600 Subject: [PATCH 50/90] Format --- test/ITensorLegacyMPS/base/test_qnmpo.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ITensorLegacyMPS/base/test_qnmpo.jl b/test/ITensorLegacyMPS/base/test_qnmpo.jl index d0c542cf54..9f3a487f0b 100644 --- a/test/ITensorLegacyMPS/base/test_qnmpo.jl +++ b/test/ITensorLegacyMPS/base/test_qnmpo.jl @@ -340,7 +340,7 @@ test_combos = [(make_Heisenberg_AutoMPO, "S=1/2"), (make_Hubbard_AutoMPO, "Elect N, NNN = 10, 7 #10 lattice site, up 7th neight interactions sites = siteinds(test_combo[2], N; conserve_qns=true) H = test_combo[1](sites, NNN) - for n in 1:N-1 + for n in 1:(N - 1) W = H[n] @test flux(W) == QN("Sz", 0) ilr = filterinds(W; tags="l=$n")[1] From 9128de9f6c4a3c4a034577695d77b184961ced69 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 7 Mar 2023 18:08:35 -0600 Subject: [PATCH 51/90] NDTensors unit, switch rq to ql decomp. --- NDTensors/test/linearalgebra.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 4777ef21b8..c86daf70ce 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -28,12 +28,12 @@ end @test A ≈ Q * R atol = 1e-13 @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-13 end -@testset "Dense RQ decomposition" begin +@testset "Dense LQ decomposition" begin n, m = 4, 8 nm = min(n, m) A = randomTensor(n, m) - R, Q = rq(A) - @test A ≈ R * Q atol = 1e-13 + Q, L = ql(A) + @test A ≈ Q * L atol = 1e-13 @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, nm)) atol = 1e-13 end From cb1f0d1e387686443bdac3cc0b24071a23252f70 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 8 Mar 2023 09:07:44 -0600 Subject: [PATCH 52/90] Finish merge and get all decomp unit test working. --- NDTensors/src/linearalgebra.jl | 62 +++++---------- src/imports.jl | 1 - test/base/test_decomp.jl | 138 +++++++++++++++++++++++---------- 3 files changed, 116 insertions(+), 85 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 3bba481b58..8929f8af99 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -369,16 +369,18 @@ end # # QR rank reduction helpers # -function find_zero_rows(R::AbstractMatrix, rr_cutoff::Float64)::Array{Bool} where {ElT,IndsT} +function find_zero_rows(R::AbstractMatrix, rr_cutoff::Float64)::Array{Bool} nr, nc = size(R) - return map((r)->(maximum(abs.(R[r,1:nc])) <= rr_cutoff),1:nr ) + return map((r) -> (maximum(abs.(R[r, 1:nc])) <= rr_cutoff), 1:nr) end # # Trim out zero rows of R within tolerance rr_cutoff. Also trim the corresponding columns # of Q. # -function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64) where {ElT,IndsT} +function trim_rows( + R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64 +) where {ElT,IndsT} zeros = find_zero_rows(R, rr_cutoff) num_zero_rows = sum(zeros) if num_zero_rows == 0 @@ -387,7 +389,7 @@ function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64) whe #@printf "Rank Reveal removing %4i rows with rr_cutoff=%.1e\n" num_zero_rows rr_cutoff Rnr, Rnc = size(R) Qnr, Qnc = size(Q) - #@assert Rnr==Qnc Q is strided to we can't asume this + #@assert Rnr==Qnc Q is strided so we can't asume this R1nr = Rnr - num_zero_rows T = eltype(R) R1 = Matrix{T}(undef, R1nr, Rnc) @@ -397,46 +399,12 @@ function trim_rows(R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64) whe if zeros[r] == false R1[r1, :] = R[r, :] #transfer row Q1[:, r1] = Q[:, r] #transfer column - r1 += 1 #next row in rank reduces matrices. + r1 += 1 #next row in rank reduced matrices. end #if zero end #for r return R1, Q1 end -# -# Trim out zero columnss of R within tolerance rr_cutoff. Also trim the corresponding rows -# of Q. -# -function trim_columns(R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64) where {ElT,IndsT} - R, Q = trim_rows(transpose(R), transpose(Q), rr_cutoff) - return transpose(R), transpose(Q) -end -function qr(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} - positive = get(kwargs, :positive, false) - # TODO: just call qr on T directly (make sure - # that is fast) - if positive - QM, RM = qr_positive(matrix(T)) - else - QM, RM = qr(matrix(T)) - end - # - # Do row removal for rank revealing RQ - # - rr_cutoff::Float64 = get(kwargs, :rr_cutoff, -1.0) - if rr_cutoff >= 0.0 - RM, QM = trim_rows(RM, QM, rr_cutoff) - end - # - # Make the new indices to go onto Q and R - # - IndexT = IndsT.parameters[1] - nq = IndexT(size(RM)[1]) #dim of the link index - Qinds = IndsT((ind(T, 1), nq)) - Rinds = IndsT((nq, ind(T, 2))) - Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided - R = tensor(Dense(vec(RM)), Rinds) - return Q, R function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} qxf = positive ? qr_positive : qr return qx(qxf, T; kwargs...) @@ -450,11 +418,19 @@ end # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} +function qx(qx::Function, T::DenseTensor{ElT,2,IndsT}; rr_cutoff=-1.0, kwargs...) where {ElT,IndsT} QM, XM = qx(matrix(T)) - # Make the new indices to go onto Q and R - q, r = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) + # + # Do row removal for rank revealing RQ + # + if rr_cutoff >= 0.0 + XM, QM = trim_rows(XM, QM, rr_cutoff) + end + # + # Make the new indices to go onto Q and X + # + IndexT = IndsT.parameters[1] #establish the index type. + q = IndexT(size(XM)[1]) #create the Q--X link index. Qinds = IndsT((ind(T, 1), q)) Xinds = IndsT((q, ind(T, 2))) Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided diff --git a/src/imports.jl b/src/imports.jl index 203923c6b2..09fc2465f5 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -125,7 +125,6 @@ using ITensors.NDTensors: eachdiagblock, fill!!, randn!!, - rq, single_precision, timer diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 6c4a7e702f..3e27528583 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -67,54 +67,78 @@ function is_upper(A::ITensor, r::Index)::Bool end is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) -@testset "ITensor Decompositions" begin - @testset "truncate!" begin - a = [0.1, 0.01, 1e-13] - @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == - (1e-13, (0.01 + 1e-13) / 2) - @test length(a) == 2 - - # Negative definite spectrum treated by taking - # square (if singular values) or absolute values - a = [-0.12, -0.1] - @test NDTensors.truncate!(a) == (0.0, 0.0) - @test length(a) == 2 - - a = [-0.1, -0.01, -1e-13] - @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == - (1e-13, (0.01 + 1e-13) / 2) - @test length(a) == 2 +# +# Makes all columns lineary depenedent but scaled differently. +# +function rank_fix(A::ITensor, Linds...) + Lis = commoninds(A, (Linds...)) + Ris = uniqueinds(A, Lis) + # + # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. + # + CL, CR = combiner(Lis...), combiner(Ris...) + cL, cR = combinedind(CL), combinedind(CR) + AC = A * CR * CL + if inds(AC) != IndexSet(cL, cR) + AC = permute(AC, cL, cR) end - - @testset "factorize" begin - i = Index(2, "i") - j = Index(2, "j") - A = randomITensor(i, j) - @test_throws ErrorException factorize(A, i; dir="left") - @test_throws ErrorException factorize(A, i; ortho="fakedir") + At = tensor(AC) + nc = dim(At, 2) + @assert nc >= 2 + for c in 2:nc + At[:, c] = At[:, 1] * 1.05^c end + return itensor(At) * dag(CL) * dag(CR) +end - @testset "factorize with eigen_perturbation" begin - l = Index(4, "l") - s1 = Index(2, "s1") - s2 = Index(2, "s2") - r = Index(4, "r") +@testset "ITensor Decompositions" begin + # @testset "truncate!" begin + # a = [0.1, 0.01, 1e-13] + # @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == + # (1e-13, (0.01 + 1e-13) / 2) + # @test length(a) == 2 + + # # Negative definite spectrum treated by taking + # # square (if singular values) or absolute values + # a = [-0.12, -0.1] + # @test NDTensors.truncate!(a) == (0.0, 0.0) + # @test length(a) == 2 + + # a = [-0.1, -0.01, -1e-13] + # @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == + # (1e-13, (0.01 + 1e-13) / 2) + # @test length(a) == 2 + # end - phi = randomITensor(l, s1, s2, r) + # @testset "factorize" begin + # i = Index(2, "i") + # j = Index(2, "j") + # A = randomITensor(i, j) + # @test_throws ErrorException factorize(A, i; dir="left") + # @test_throws ErrorException factorize(A, i; ortho="fakedir") + # end - drho = randomITensor(l', s1', l, s1) - drho += swapprime(drho, 0, 1) - drho .*= 1E-5 + # @testset "factorize with eigen_perturbation" begin + # l = Index(4, "l") + # s1 = Index(2, "s1") + # s2 = Index(2, "s2") + # r = Index(4, "r") - U, B = factorize(phi, (l, s1); ortho="left", eigen_perturbation=drho) - @test norm(U * B - phi) < 1E-5 + # phi = randomITensor(l, s1, s2, r) - # Not allowed to use eigen_perturbation with which_decomp - # other than "automatic" or "eigen": - @test_throws ErrorException factorize( - phi, (l, s1); ortho="left", eigen_perturbation=drho, which_decomp="svd" - ) - end + # drho = randomITensor(l', s1', l, s1) + # drho += swapprime(drho, 0, 1) + # drho .*= 1E-5 + + # U, B = factorize(phi, (l, s1); ortho="left", eigen_perturbation=drho) + # @test norm(U * B - phi) < 1E-5 + + # # Not allowed to use eigen_perturbation with which_decomp + # # other than "automatic" or "eigen": + # @test_throws ErrorException factorize( + # phi, (l, s1); ortho="left", eigen_perturbation=drho, which_decomp="svd" + # ) + # end @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in [ @@ -335,6 +359,38 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @test A ≈ Q * L atol = 1e-13 end + @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in + [1,2,3], + elt in [Float64, ComplexF64] + + l = Index(5, "l") + s = Index(2, "s") + r = Index(5, "r") + A = randomITensor(elt, l, s, s', r) + + Ainds = inds(A) + A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. + Q, R, q = qr(A, Ainds[1:ninds]; rr_cutoff=1e-12) + @test dim(q) == 1 #check that we found rank==1 + @test A ≈ Q * R atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + R, Q, q = rq(A, Ainds[1:ninds]; rr_cutoff=1e-12) + @test dim(q) == 1 #check that we found rank==1 + @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + L, Q, q = lq(A, Ainds[1:ninds]; rr_cutoff=1e-12) + @test dim(q) == 1 #check that we found rank==1 + @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + + Q, L, q = ql(A, Ainds[1:ninds]; rr_cutoff=1e-12) + @test dim(q) == 1 #check that we found rank==1 + @test A ≈ Q * L atol = 1e-13 + @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + end + @testset "factorize with QR" begin l = Index(5, "l") s = Index(2, "s") From 32972cc485900b79f07f719c933f73412dda4fba Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 8 Mar 2023 10:07:21 -0600 Subject: [PATCH 53/90] Use more likely to pass a String rather than a tagset Take away TagSet restiction on arguments for xq() function Add some test to confirm the users tags are getting used. --- src/tensor_operations/matrix_decomposition.jl | 2 +- test/base/test_decomp.jl | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index f8be4b0a3c..8e6fe982b6 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -482,7 +482,7 @@ end # with swapping the left and right indices. The X tensor = R or L. # function xq( - qx::Function, qtags::TagSet, A::ITensor, Linds::Indices, Rinds::Indices; kwargs... + qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs... ) Q, X, q = qx(A, Rinds, Linds; kwargs...) # diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 6c4a7e702f..639417c550 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -156,13 +156,13 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) if (length(inds(R)) > 1) @test is_upper(R, q) #specify the right index end - R1, Q1, q1 = rq(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. + R1, Q1, q1 = rq(A, Linds, Rinds; tags="Link,myrq") #make sure the same call with both L & R indices give the same answer. Q1 = replaceind(Q1, q1, q) R1 = replaceind(R1, q1, q) @test norm(Q - Q1) == 0.0 @test norm(R - R1) == 0.0 - # @test hastags(q, "myrq") - # @test hastags(q, "Link") + @test hastags(q1, "myrq") + @test hastags(q1, "Link") L, Q, q = lq(A, Linds) @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. @@ -174,11 +174,13 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) if (length(inds(L)) > 1) @test is_lower(L, q) #specify the right index end - L1, Q1, q1 = lq(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. + L1, Q1, q1 = lq(A, Linds, Rinds; tags="Link,mylq") #make sure the same call with both L & R indices give the same answer. Q1 = replaceind(Q1, q1, q) L1 = replaceind(L1, q1, q) @test norm(Q - Q1) == 0.0 @test norm(L - L1) == 0.0 + @test hastags(q1, "mylq") + @test hastags(q1, "Link") Q, L, q = ql(A, Linds) @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. @@ -190,11 +192,13 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) if (length(inds(L)) > 1) @test is_lower(q, L) #specify the right index end - Q1, L1, q1 = ql(A, Linds, Rinds) #make sure the same call with both L & R indices give the same answer. + Q1, L1, q1 = ql(A, Linds, Rinds; tags="Link,myql") #make sure the same call with both L & R indices give the same answer. Q1 = replaceind(Q1, q1, q) L1 = replaceind(L1, q1, q) @test norm(Q - Q1) == 0.0 @test norm(L - L1) == 0.0 + @test hastags(q1, "myql") + @test hastags(q1, "Link") end @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ From aa2991455ab1c29b377431885040c54ecad234e9 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 8 Mar 2023 10:08:34 -0600 Subject: [PATCH 54/90] Format --- src/tensor_operations/matrix_decomposition.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 8e6fe982b6..595da6c981 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -481,9 +481,7 @@ end # Generic function implementing both rq and lq decomposition. Implemented using qr/ql # with swapping the left and right indices. The X tensor = R or L. # -function xq( - qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs... -) +function xq(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) Q, X, q = qx(A, Rinds, Linds; kwargs...) # # fix up the tag name for the index between Q and L. From 858e070e6ac0936a251bc29f06ace2271f007cd9 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 8 Mar 2023 17:56:01 -0600 Subject: [PATCH 55/90] Fix bug in ql_positive routine Locating the diagonal in a lower tri matrix requires a little extra work. Add unit tests accordingly --- NDTensors/src/linearalgebra.jl | 4 +-- test/base/test_decomp.jl | 49 +++++++++++++++++++++++++--------- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index f144eb3760..a803ebabd1 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -428,9 +428,9 @@ matrix is unique. Returns a tuple (Q,L). function ql_positive(M::AbstractMatrix) sparseQ, L = ql(M) Q = convert(Matrix, sparseQ) - nr, nc = size(Q) + nr, nc = size(L) dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr - for c in 1:nc + for c in 1:(nc - dc) if c <= nr && real(L[c, c + dc]) < 0.0 L[c, 1:(c + dc)] *= -1 #only fip non-zero portion of the column. Q[:, c] *= -1 diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 639417c550..8901a3c90c 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -1,5 +1,7 @@ using ITensors, LinearAlgebra, Test +using Printf +Base.show(io::IO, f::Float64) = @printf(io, "%1.3f", f) # # Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. # @@ -67,6 +69,27 @@ function is_upper(A::ITensor, r::Index)::Bool end is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) +function diag_upper(l::Index, A::ITensor) + At = tensor(A * combiner(noncommoninds(A, l)...)) + if size(At) == (1,) + return At + end + @assert length(size(At)) == 2 + return diag(At) +end + +function diag_lower(l::Index, A::ITensor) + At = tensor(A * combiner(noncommoninds(A, l)...)) #render down ot order 2 + if size(At) == (1,) + return At + end + @assert length(size(At)) == 2 + nr, nc = size(At) + dc = Base.max(0, nc - nr) #diag starts dc+1 columns out from the left + At1 = At[:, (dc + 1):nc] #chop out the first dc columns + return diag(At1) #now we can use the stock diag function. +end + @testset "ITensor Decompositions" begin @testset "truncate!" begin a = [0.1, 0.01, 1e-13] @@ -302,26 +325,28 @@ is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 end - @testset "QR/QL/RQ/LQ dense with positive R" begin - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") + @testset "QR/QL/RQ/LQ dense with positive R" for ninds in [0, 1, 2, 3] + l = Index(3, "l") + s = Index(5, "s") + r = Index(7, "r") A = randomITensor(l, s, s', r) - Q, R, q = qr(A, l, s, s'; positive=true) - @test min(diag(R)...) > 0.0 + Ainds = inds(A) + + Q, R, q = qr(A, Ainds[1:ninds]; positive=true) + @test min(diag_upper(q, R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ITensors.ql(A, l, s, s'; positive=true) - @test min(diag(L)...) > 0.0 + Q, L, q = ITensors.ql(A, Ainds[1:ninds]; positive=true) + @test min(diag_lower(q, L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, r; positive=true) - @test min(diag(R)...) > 0.0 + R, Q, q = ITensors.rq(A, Ainds[1:ninds]; positive=true) + @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = ITensors.lq(A, r; positive=true) - @test min(diag(L)...) > 0.0 + L, Q, q = ITensors.lq(A, Ainds[1:ninds]; positive=true) + @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end From 5ee47bfbff7375b74ecefc0f0c6541b45a501b00 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 8 Mar 2023 20:37:08 -0600 Subject: [PATCH 56/90] Fix: UndefVarError: tensor not defined Have to remember run unit test in a fresh REPL. --- test/base/test_decomp.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 79a70b0688..b63fe97cb1 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -1,7 +1,7 @@ -using ITensors, LinearAlgebra, Test +using ITensors, LinearAlgebra, Test, NDTensors # -# Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. +# Decide if rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. # function is_upper(At::NDTensors.Tensor)::Bool nr, nc = dims(At) From 420241d81d574439fef65caf9671528380661d89 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 8 Mar 2023 21:19:55 -0600 Subject: [PATCH 57/90] Qualify tensor() --- test/base/test_decomp.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index b63fe97cb1..05cf2fabab 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -1,4 +1,4 @@ -using ITensors, LinearAlgebra, Test, NDTensors +using ITensors, LinearAlgebra, Test # # Decide if rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. @@ -68,7 +68,7 @@ end is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) function diag_upper(l::Index, A::ITensor) - At = tensor(A * combiner(noncommoninds(A, l)...)) + At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...)) if size(At) == (1,) return At end @@ -77,7 +77,7 @@ function diag_upper(l::Index, A::ITensor) end function diag_lower(l::Index, A::ITensor) - At = tensor(A * combiner(noncommoninds(A, l)...)) #render down ot order 2 + At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...)) #render down ot order 2 if size(At) == (1,) return At end From 5e72caa1b60e2a2ddda6597c325fe990c29d4d56 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Fri, 10 Mar 2023 10:51:24 -0600 Subject: [PATCH 58/90] Stop using Printf at the NDTensors level. --- NDTensors/src/linearalgebra.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index 42ab2883d1..a3ce0ef6cc 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -386,7 +386,7 @@ function trim_rows( if num_zero_rows == 0 return R, Q end - #@printf "Rank Reveal removing %4i rows with rr_cutoff=%.1e\n" num_zero_rows rr_cutoff + #println("Rank Reveal removing $num_zero_rows rows with log10(rr_cutoff)=$(log10(rr_cutoff))") Rnr, Rnc = size(R) Qnr, Qnc = size(Q) #@assert Rnr==Qnc Q is strided so we can't asume this @@ -421,7 +421,7 @@ end function qx(qx::Function, T::DenseTensor{ElT,2,IndsT}; rr_cutoff=-1.0, kwargs...) where {ElT,IndsT} QM, XM = qx(matrix(T)) # - # Do row removal for rank revealing RQ + # Do row removal for rank revealing QR/QL # if rr_cutoff >= 0.0 XM, QM = trim_rows(XM, QM, rr_cutoff) From 2b10908a9442f93584f8af7a181f6833c6980745 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 11 Mar 2023 13:32:57 -0600 Subject: [PATCH 59/90] Enhance unit tests for qr/ql decomp Test for 32/64 bit float and complex types Test for positive/real diagonals on L/R Enhance unit tests for qr/ql decomp --- NDTensors/test/linearalgebra.jl | 50 ++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index c86daf70ce..252d8db621 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,21 +20,43 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -@testset "Dense QR decomposition" begin - n, m = 4, 8 - nm = min(n, m) - A = randomTensor(n, m) - Q, R = qr(A) - @test A ≈ Q * R atol = 1e-13 - @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-13 -end -@testset "Dense LQ decomposition" begin + +Base.eps(::Type{Complex{T}}) where T <: AbstractFloat = eps(T) + +@testset "Dense $qx decomposition, elt=$elt, positve=$positive" for qx in [qr,ql], elt in [ Float64, ComplexF64, Float32, ComplexF32], positive in [false,true] + eps=Base.eps(elt)*30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. n, m = 4, 8 - nm = min(n, m) - A = randomTensor(n, m) - Q, L = ql(A) - @test A ≈ Q * L atol = 1e-13 - @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, nm)) atol = 1e-13 + Id=Diagonal(fill(1.0, min(n, m))) + # + # Wide matrix (more columns than rows) + # + A = randomTensor(elt, n, m) + Q, X = qx(A,positive=positive) #X is R or L. + @test A ≈ Q * X atol = eps + @test array(Q)' * array(Q) ≈ Id atol = eps + @test array(Q) * array(Q)' ≈ Id atol = eps + if positive + nr,nc=size(X) + dr=qx==ql ? Base.max(0,nc-nr) : 0 + diagX=diag(X[:,1+dr:end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX).>=0.0) + @test all(imag(diagX).==0.0) + end + # + # Tall matrix (more rows than cols) + # + A = randomTensor(elt, m, n) #Tall array + Q, X = qx(A,positive=positive) + @test A ≈ Q * X atol = eps + @test array(Q)' * array(Q) ≈ Id atol = eps + if positive + nr,nc=size(X) + dr=qx==ql ? Base.max(0,nc-nr) : 0 + diagX=diag(X[:,1+dr:end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX).>=0.0) + @test all(imag(diagX).==0.0) + end end + nothing From 3b5072d092ecd9db6b03c4450cc90a53ae2c9829 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sat, 11 Mar 2023 20:46:38 -0600 Subject: [PATCH 60/90] Format --- NDTensors/test/linearalgebra.jl | 37 +++++++++++++++++---------------- test/base/test_decomp.jl | 20 +++++++++--------- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 252d8db621..d530d56505 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,43 +20,44 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end +Base.eps(::Type{Complex{T}}) where {T<:AbstractFloat} = eps(T) -Base.eps(::Type{Complex{T}}) where T <: AbstractFloat = eps(T) +@testset "Dense $qx decomposition, elt=$elt, positve=$positive" for qx in [qr, ql], + elt in [Float64, ComplexF64, Float32, ComplexF32], + positive in [false, true] -@testset "Dense $qx decomposition, elt=$elt, positve=$positive" for qx in [qr,ql], elt in [ Float64, ComplexF64, Float32, ComplexF32], positive in [false,true] - eps=Base.eps(elt)*30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. + eps = Base.eps(elt) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. n, m = 4, 8 - Id=Diagonal(fill(1.0, min(n, m))) + Id = Diagonal(fill(1.0, min(n, m))) # # Wide matrix (more columns than rows) # A = randomTensor(elt, n, m) - Q, X = qx(A,positive=positive) #X is R or L. + Q, X = qx(A; positive=positive) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Id atol = eps @test array(Q) * array(Q)' ≈ Id atol = eps - if positive - nr,nc=size(X) - dr=qx==ql ? Base.max(0,nc-nr) : 0 - diagX=diag(X[:,1+dr:end]) #location of diag(L) is shifted dr columns over the right. - @test all(real(diagX).>=0.0) - @test all(imag(diagX).==0.0) + if positive + nr, nc = size(X) + dr = qx == ql ? Base.max(0, nc - nr) : 0 + diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX) .>= 0.0) + @test all(imag(diagX) .== 0.0) end # # Tall matrix (more rows than cols) # A = randomTensor(elt, m, n) #Tall array - Q, X = qx(A,positive=positive) + Q, X = qx(A; positive=positive) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Id atol = eps if positive - nr,nc=size(X) - dr=qx==ql ? Base.max(0,nc-nr) : 0 - diagX=diag(X[:,1+dr:end]) #location of diag(L) is shifted dr columns over the right. - @test all(real(diagX).>=0.0) - @test all(imag(diagX).==0.0) + nr, nc = size(X) + dr = qx == ql ? Base.max(0, nc - nr) : 0 + diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX) .>= 0.0) + @test all(imag(diagX) .== 0.0) end end - nothing diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 05cf2fabab..68601f35a1 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -150,13 +150,13 @@ end Ainds = inds(A) Linds = Ainds[1:ninds] Rinds = uniqueinds(A, Linds...) - Q, R, q = qr(A, Linds; tags="Link,qr1") #calling qr(A) triggers not supported error. + Q, R, q = qr(A, Linds) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q == commonind(Q, R) - @test hastags(q, "qr1") + @test hastags(q, "Link,qr") if (length(inds(R)) > 1) @test is_upper(q, R) #specify the left index end @@ -236,7 +236,7 @@ end @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + R, Q, q = rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -266,7 +266,7 @@ end # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - Q, L, q = ITensors.ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(L)) == 3 - ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -274,7 +274,7 @@ end @test A ≈ Q * L atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -282,7 +282,7 @@ end @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - L, Q, q = ITensors.lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + L, Q, q = lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(L)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -334,16 +334,16 @@ end @test min(diag_upper(q, R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ITensors.ql(A, Ainds[1:ninds]; positive=true) + Q, L, q = ql(A, Ainds[1:ninds]; positive=true) @test min(diag_lower(q, L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]; positive=true) + R, Q, q = rq(A, Ainds[1:ninds]; positive=true) @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = ITensors.lq(A, Ainds[1:ninds]; positive=true) + L, Q, q = lq(A, Ainds[1:ninds]; positive=true) @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -357,7 +357,7 @@ end Q, R, q = qr(A, l, s, dag(s'); positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 - Q, L, q = ITensors.ql(A, l, s, dag(s'); positive=true) + Q, L, q = ql(A, l, s, dag(s'); positive=true) @test min(diag(L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 end From 59542df334414448e4b0df841869be07f144822a Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Sun, 12 Mar 2023 11:06:04 -0600 Subject: [PATCH 61/90] Don't assume lapack qr/ql returns reals on the R/L diagonals Use a more general complex phase change, instead of the sign change. --- NDTensors/src/linearalgebra.jl | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index a803ebabd1..f50847be5c 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -409,9 +409,10 @@ function qr_positive(M::AbstractMatrix) Q = convert(Matrix, sparseQ) nc = size(Q, 2) for c in 1:nc - if real(R[c, c]) < 0.0 - R[c, c:end] *= -1 #only fip non-zero portion of the row. - Q[:, c] *= -1 + sign_Rc = sign(R[c, c]) + if !isone(sign_Rc) + R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. + Q[:, c] *= sign_Rc end end return (Q, R) @@ -431,9 +432,10 @@ function ql_positive(M::AbstractMatrix) nr, nc = size(L) dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr for c in 1:(nc - dc) - if c <= nr && real(L[c, c + dc]) < 0.0 - L[c, 1:(c + dc)] *= -1 #only fip non-zero portion of the column. - Q[:, c] *= -1 + sign_Lc = sign(L[c, c + dc]) + if c <= nr && !isone(sign_Lc) + L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. + Q[:, c] *= conj(sign_Lc) end end return (Q, L) From 042578b974161c2c20bffdaa2148a400947d8de3 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 15 Mar 2023 10:24:22 -0600 Subject: [PATCH 62/90] Avoid type piracy in unit test code --- NDTensors/test/linearalgebra.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index d530d56505..3a9a01f659 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,13 +20,11 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -Base.eps(::Type{Complex{T}}) where {T<:AbstractFloat} = eps(T) - @testset "Dense $qx decomposition, elt=$elt, positve=$positive" for qx in [qr, ql], elt in [Float64, ComplexF64, Float32, ComplexF32], positive in [false, true] - eps = Base.eps(elt) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. + eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. n, m = 4, 8 Id = Diagonal(fill(1.0, min(n, m))) # From e6ff0e6ad72b92719e1b47b3f45eae33d1271d72 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 15 Mar 2023 10:41:09 -0600 Subject: [PATCH 63/90] Remove unnessecary usage of where {ElT} --- NDTensors/src/blocksparse/linearalgebra.jl | 8 ++++---- NDTensors/src/linearalgebra.jl | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 22434db26f..0556674177 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,16 +296,16 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end -ql(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(ql, T; kwargs...) -qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(qr, T; kwargs...) +ql(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) +qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # # Generic function to implelement blocks sparse qr/ql decomposition. It calls # the dense qr or ql for each block. The X tensor = R or L. # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} - +function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) + ElT=eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) nzblocksT = nzblocks(T) diff --git a/NDTensors/src/linearalgebra.jl b/NDTensors/src/linearalgebra.jl index f50847be5c..8e164ad8d0 100644 --- a/NDTensors/src/linearalgebra.jl +++ b/NDTensors/src/linearalgebra.jl @@ -367,11 +367,11 @@ function LinearAlgebra.eigen( return D, V, spec end -function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} +function qr(T::DenseTensor{<:Any,2,IndsT}; positive=false, kwargs...) where {IndsT} qxf = positive ? qr_positive : qr return qx(qxf, T; kwargs...) end -function ql(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} +function ql(T::DenseTensor{<:Any,2,IndsT}; positive=false, kwargs...) where {IndsT} qxf = positive ? ql_positive : ql return qx(qxf, T; kwargs...) end @@ -380,7 +380,7 @@ end # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} +function qx(qx::Function, T::DenseTensor{<:Any,2,IndsT}; kwargs...) where {IndsT} QM, XM = qx(matrix(T)) # Make the new indices to go onto Q and R q, r = inds(T) From af7384402ae13a6af6474ef1bd86ec379a15d72a Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 15 Mar 2023 11:08:34 -0600 Subject: [PATCH 64/90] Pass tags as a keyward argument. And format. --- NDTensors/src/blocksparse/linearalgebra.jl | 4 +-- src/tensor_operations/matrix_decomposition.jl | 28 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 0556674177..0954bb3383 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -304,8 +304,8 @@ qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) - ElT=eltype(T) +function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) + ElT = eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) nzblocksT = nzblocks(T) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 595da6c981..c0fd02943f 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -405,7 +405,7 @@ rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) # -# Use supplied only left indices as a tuple or vector. +# User supplied only left indices as a tuple or vector. # qr(A::ITensor, Linds::Indices; kwargs...) = qr(A, Linds, uniqueinds(A, Linds); kwargs...) ql(A::ITensor, Linds::Indices; kwargs...) = ql(A, Linds, uniqueinds(A, Linds); kwargs...) @@ -419,25 +419,25 @@ ql(A::ITensor, Linds...; kwargs...) = ql(A, Linds, uniqueinds(A, Linds); kwargs. rq(A::ITensor, Linds...; kwargs...) = rq(A, Linds, uniqueinds(A, Linds); kwargs...) lq(A::ITensor, Linds...; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs...) # -# Core function where both left and right indices are supplied as tuples of vectors +# Core function where both left and right indices are supplied as tuples or vectors # Handle default tags and dispatch to generic qx/xq functions. # function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", kwargs...) - return qx(qr, tags, A, Linds, Rinds; kwargs...) + return qx(qr, A, Linds, Rinds; tags, kwargs...) end function ql(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,ql", kwargs...) - return qx(ql, tags, A, Linds, Rinds; kwargs...) + return qx(ql, A, Linds, Rinds; tags, kwargs...) end function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...) - return xq(ql, tags, A, Linds, Rinds; kwargs...) + return xq(ql, A, Linds, Rinds; tags, kwargs...) end function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", kwargs...) - return xq(qr, tags, A, Linds, Rinds; kwargs...) + return xq(qr, A, Linds, Rinds; tags, kwargs...) end # # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # -function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) +function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) # Strip out any extra indices that are not in A. # Unit test test/base/test_itensor.jl line 1469 will fail without this. Linds = commoninds(A, Linds) @@ -470,9 +470,9 @@ function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwa # fix up the tag name for the index between Q and X. # q = commonind(Q, X) - Q = settags(Q, qtags, q) - X = settags(X, qtags, q) - q = settags(q, qtags) + Q = settags(Q, tags, q) + X = settags(X, tags, q) + q = settags(q, tags) return Q, X, q end @@ -481,14 +481,14 @@ end # Generic function implementing both rq and lq decomposition. Implemented using qr/ql # with swapping the left and right indices. The X tensor = R or L. # -function xq(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) +function xq(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) Q, X, q = qx(A, Rinds, Linds; kwargs...) # # fix up the tag name for the index between Q and L. # - Q = settags(Q, qtags, q) - X = settags(X, qtags, q) - q = settags(q, qtags) + Q = settags(Q, tags, q) + X = settags(X, tags, q) + q = settags(q, tags) return X, Q, q end From 6d3f6b3deb65d30f63c20e47540691d4e8f61a77 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 15 Mar 2023 12:14:07 -0600 Subject: [PATCH 65/90] Use new randomTensor(ElT,tuple) interface --- NDTensors/test/linearalgebra.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 3a9a01f659..dd7058f376 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -30,7 +30,7 @@ end # # Wide matrix (more columns than rows) # - A = randomTensor(elt, n, m) + A = randomTensor(elt, (n, m)) Q, X = qx(A; positive=positive) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Id atol = eps @@ -45,7 +45,7 @@ end # # Tall matrix (more rows than cols) # - A = randomTensor(elt, m, n) #Tall array + A = randomTensor(elt, (m, n)) #Tall array Q, X = qx(A; positive=positive) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Id atol = eps From 4a16f3d69921c7902e79f22028842b160cb963ac Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 15 Mar 2023 13:14:45 -0600 Subject: [PATCH 66/90] Eliminate where{IndsT} for dense qr/ql --- NDTensors/src/linearalgebra/linearalgebra.jl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 8e164ad8d0..2bb0262a82 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -367,11 +367,11 @@ function LinearAlgebra.eigen( return D, V, spec end -function qr(T::DenseTensor{<:Any,2,IndsT}; positive=false, kwargs...) where {IndsT} +function qr(T::DenseTensor{<:Any,2}; positive=false, kwargs...) qxf = positive ? qr_positive : qr return qx(qxf, T; kwargs...) end -function ql(T::DenseTensor{<:Any,2,IndsT}; positive=false, kwargs...) where {IndsT} +function ql(T::DenseTensor{<:Any,2}; positive=false, kwargs...) qxf = positive ? ql_positive : ql return qx(qxf, T; kwargs...) end @@ -380,11 +380,12 @@ end # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2,IndsT}; kwargs...) where {IndsT} +function qx(qx::Function, T::DenseTensor{<:Any,2}; kwargs...) QM, XM = qx(matrix(T)) # Make the new indices to go onto Q and R q, r = inds(T) q = dim(q) < dim(r) ? sim(q) : sim(r) + IndsT = indstype(T) #get the index type Qinds = IndsT((ind(T, 1), q)) Xinds = IndsT((q, ind(T, 2))) Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided From 49df0be6798c293fe21a92112404d15655f1e5ec Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 16 Mar 2023 11:31:33 -0600 Subject: [PATCH 67/90] Merge remote-tracking branch 'origin/RQQLLQ' into QXRankReduction --- NDTensors/src/blocksparse/linearalgebra.jl | 8 +-- NDTensors/src/linearalgebra/linearalgebra.jl | 25 ++++---- NDTensors/test/linearalgebra.jl | 63 ++++++++++++------- src/tensor_operations/matrix_decomposition.jl | 28 ++++----- test/base/test_decomp.jl | 20 +++--- 5 files changed, 84 insertions(+), 60 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 22434db26f..0954bb3383 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -296,16 +296,16 @@ function LinearAlgebra.eigen( return D, V, Spectrum(d, truncerr) end -ql(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(ql, T; kwargs...) -qr(T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} = qx(qr, T; kwargs...) +ql(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) +qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # # Generic function to implelement blocks sparse qr/ql decomposition. It calls # the dense qr or ql for each block. The X tensor = R or L. # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{ElT,2}; kwargs...) where {ElT} - +function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) + ElT = eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) nzblocksT = nzblocks(T) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index a3ce0ef6cc..d35ebf49f3 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -405,11 +405,11 @@ function trim_rows( return R1, Q1 end -function qr(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} - qxf = positive ? qr_positive : qr +function qr(T::DenseTensor{<:Any,2,IndsT}; positive=false, kwargs...) where {IndsT} + qxf = positive ? qr_positive : qr return qx(qxf, T; kwargs...) end -function ql(T::DenseTensor{ElT,2,IndsT}; positive=false, kwargs...) where {ElT,IndsT} +function ql(T::DenseTensor{<:Any,2,IndsT}; positive=false, kwargs...) where {IndsT} qxf = positive ? ql_positive : ql return qx(qxf, T; kwargs...) end @@ -418,8 +418,9 @@ end # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{ElT,2,IndsT}; rr_cutoff=-1.0, kwargs...) where {ElT,IndsT} - QM, XM = qx(matrix(T)) +function qx(qx::Function, T::DenseTensor{<:Any,2,IndsT}; rr_cutoff=-1.0, kwargs...) where {IndsT} + QM1, XM = qx(matrix(T)) + QM=convert(Matrix, QM1) # # Do row removal for rank revealing QR/QL # @@ -455,9 +456,10 @@ function qr_positive(M::AbstractMatrix) Q = convert(Matrix, sparseQ) nc = size(Q, 2) for c in 1:nc - if real(R[c, c]) < 0.0 - R[c, c:end] *= -1 #only fip non-zero portion of the row. - Q[:, c] *= -1 + sign_Rc = R[c, c]==0.0 ? 1.0 : sign(R[c, c]) + if !isone(sign_Rc) + R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. + Q[:, c] *= sign_Rc end end return (Q, R) @@ -477,9 +479,10 @@ function ql_positive(M::AbstractMatrix) nr, nc = size(L) dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr for c in 1:(nc - dc) - if c <= nr && real(L[c, c + dc]) < 0.0 - L[c, 1:(c + dc)] *= -1 #only fip non-zero portion of the column. - Q[:, c] *= -1 + sign_Lc = L[c, c + dc]==0.0 ? 1.0 : sign(L[c, c + dc]) + if c <= nr && !isone(sign_Lc) + L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. + Q[:, c] *= conj(sign_Lc) end end return (Q, L) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index cdd920dca1..2de9c90604 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,26 +20,47 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -@testset "Dense QR decomposition" begin - n, m = 4, 8 - nm = min(n, m) - A = randomTensor(n, m) - Q, R = qr(A) - @test A ≈ Q * R atol = 1e-13 - @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-13 -end -@testset "Dense LQ decomposition" begin +@testset "Dense $qx decomposition, elt=$elt, positve=$positive" for qx in [qr, ql], + elt in [Float64, ComplexF64, Float32, ComplexF32], + positive in [false, true] + + eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. n, m = 4, 8 - nm = min(n, m) - A = randomTensor(n, m) - Q, L = ql(A) - @test A ≈ Q * L atol = 1e-13 - @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, nm)) atol = 1e-13 + Id = Diagonal(fill(1.0, min(n, m))) + # + # Wide matrix (more columns than rows) + # + A = randomTensor(elt, (n, m)) + Q, X = qx(A; positive=positive) #X is R or L. + @test A ≈ Q * X atol = eps + @test array(Q)' * array(Q) ≈ Id atol = eps + @test array(Q) * array(Q)' ≈ Id atol = eps + if positive + nr, nc = size(X) + dr = qx == ql ? Base.max(0, nc - nr) : 0 + diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX) .>= 0.0) + @test all(imag(diagX) .== 0.0) + end + # + # Tall matrix (more rows than cols) + # + A = randomTensor(elt, (m, n)) #Tall array + Q, X = qx(A; positive=positive) + @test A ≈ Q * X atol = eps + @test array(Q)' * array(Q) ≈ Id atol = eps + if positive + nr, nc = size(X) + dr = qx == ql ? Base.max(0, nc - nr) : 0 + diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX) .>= 0.0) + @test all(imag(diagX) .== 0.0) + end end @testset "Dense Rank revealing QR/RQ decomposition" begin n, m = 4, 8 - A = randomTensor(n, m) + A = randomTensor(Float64,(n, m)) # make some columns lineary dependent A[2, :] = A[1, :] * 1.1 A[4, :] = A[1, :] * 2.1 @@ -50,11 +71,11 @@ end nm = dim(Q, 2) @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 - R, Q = rq(A; rr_cutoff=1e-12) - @test dim(Q, 1) == n - 2 #make 2 rows actually got removed. - @test dim(R, 2) == n - 2 #make 2 columns actually got removed. - @test A ≈ R * Q atol = 1e-12 - nm = dim(Q, 1) - @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 + Q, L = ql(A; rr_cutoff=1e-12) + @test dim(Q, 2) == n - 2 #make 2 rows actually got removed. + @test dim(L, 1) == n - 2 #make 2 columns actually got removed. + @test A ≈ Q * L atol = 1e-12 + nm = dim(Q, 2) + @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 end nothing diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 595da6c981..c0fd02943f 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -405,7 +405,7 @@ rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) # -# Use supplied only left indices as a tuple or vector. +# User supplied only left indices as a tuple or vector. # qr(A::ITensor, Linds::Indices; kwargs...) = qr(A, Linds, uniqueinds(A, Linds); kwargs...) ql(A::ITensor, Linds::Indices; kwargs...) = ql(A, Linds, uniqueinds(A, Linds); kwargs...) @@ -419,25 +419,25 @@ ql(A::ITensor, Linds...; kwargs...) = ql(A, Linds, uniqueinds(A, Linds); kwargs. rq(A::ITensor, Linds...; kwargs...) = rq(A, Linds, uniqueinds(A, Linds); kwargs...) lq(A::ITensor, Linds...; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs...) # -# Core function where both left and right indices are supplied as tuples of vectors +# Core function where both left and right indices are supplied as tuples or vectors # Handle default tags and dispatch to generic qx/xq functions. # function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", kwargs...) - return qx(qr, tags, A, Linds, Rinds; kwargs...) + return qx(qr, A, Linds, Rinds; tags, kwargs...) end function ql(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,ql", kwargs...) - return qx(ql, tags, A, Linds, Rinds; kwargs...) + return qx(ql, A, Linds, Rinds; tags, kwargs...) end function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...) - return xq(ql, tags, A, Linds, Rinds; kwargs...) + return xq(ql, A, Linds, Rinds; tags, kwargs...) end function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", kwargs...) - return xq(qr, tags, A, Linds, Rinds; kwargs...) + return xq(qr, A, Linds, Rinds; tags, kwargs...) end # # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # -function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) +function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) # Strip out any extra indices that are not in A. # Unit test test/base/test_itensor.jl line 1469 will fail without this. Linds = commoninds(A, Linds) @@ -470,9 +470,9 @@ function qx(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwa # fix up the tag name for the index between Q and X. # q = commonind(Q, X) - Q = settags(Q, qtags, q) - X = settags(X, qtags, q) - q = settags(q, qtags) + Q = settags(Q, tags, q) + X = settags(X, tags, q) + q = settags(q, tags) return Q, X, q end @@ -481,14 +481,14 @@ end # Generic function implementing both rq and lq decomposition. Implemented using qr/ql # with swapping the left and right indices. The X tensor = R or L. # -function xq(qx::Function, qtags, A::ITensor, Linds::Indices, Rinds::Indices; kwargs...) +function xq(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) Q, X, q = qx(A, Rinds, Linds; kwargs...) # # fix up the tag name for the index between Q and L. # - Q = settags(Q, qtags, q) - X = settags(X, qtags, q) - q = settags(q, qtags) + Q = settags(Q, tags, q) + X = settags(X, tags, q) + q = settags(q, tags) return X, Q, q end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 2e089bc7a5..1c847bbbdf 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -174,13 +174,13 @@ end Ainds = inds(A) Linds = Ainds[1:ninds] Rinds = uniqueinds(A, Linds...) - Q, R, q = qr(A, Linds; tags="Link,qr1") #calling qr(A) triggers not supported error. + Q, R, q = qr(A, Linds) #calling qr(A) triggers not supported error. @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. @test length(inds(R)) == 3 - ninds + 1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test q == commonind(Q, R) - @test hastags(q, "qr1") + @test hastags(q, "Link,qr") if (length(inds(R)) > 1) @test is_upper(q, R) #specify the left index end @@ -260,7 +260,7 @@ end @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) + R, Q, q = rq(A, Ainds[1:ninds]) @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 4 - ninds + 1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @@ -290,7 +290,7 @@ end # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - Q, L, q = ITensors.ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(L)) == 3 - ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -298,7 +298,7 @@ end @test A ≈ Q * L atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -306,7 +306,7 @@ end @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - L, Q, q = ITensors.lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. + L, Q, q = lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. @test length(inds(L)) == ninds + 1 #+1 to account for new rq,Link index. @test length(inds(Q)) == 3 - ninds + 1 @test flux(Q) == expected_Qflux[ninds + 1] @@ -358,16 +358,16 @@ end @test min(diag_upper(q, R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ITensors.ql(A, Ainds[1:ninds]; positive=true) + Q, L, q = ql(A, Ainds[1:ninds]; positive=true) @test min(diag_lower(q, L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = ITensors.rq(A, Ainds[1:ninds]; positive=true) + R, Q, q = rq(A, Ainds[1:ninds]; positive=true) @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = ITensors.lq(A, Ainds[1:ninds]; positive=true) + L, Q, q = lq(A, Ainds[1:ninds]; positive=true) @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -381,7 +381,7 @@ end Q, R, q = qr(A, l, s, dag(s'); positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 - Q, L, q = ITensors.ql(A, l, s, dag(s'); positive=true) + Q, L, q = ql(A, l, s, dag(s'); positive=true) @test min(diag(L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 end From e53c85bc65958ea67a214cb8e695d95acba65943 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 16 Mar 2023 12:25:06 -0600 Subject: [PATCH 68/90] Handle zero pivots gracefully with the new sign(diag) code. --- NDTensors/src/linearalgebra/linearalgebra.jl | 26 ++++++++++++-------- NDTensors/test/linearalgebra.jl | 17 +++++++++++-- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 2bb0262a82..1f004fc673 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -382,6 +382,8 @@ end # function qx(qx::Function, T::DenseTensor{<:Any,2}; kwargs...) QM, XM = qx(matrix(T)) + # Be aware that if positive==false, then typeof(QM)=LinearAlgebra.QRCompactWYQ, not Matrix + # It gets converted to matrix below. # Make the new indices to go onto Q and R q, r = inds(T) q = dim(q) < dim(r) ? sim(q) : sim(r) @@ -410,11 +412,13 @@ function qr_positive(M::AbstractMatrix) Q = convert(Matrix, sparseQ) nc = size(Q, 2) for c in 1:nc - sign_Rc = sign(R[c, c]) - if !isone(sign_Rc) - R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. - Q[:, c] *= sign_Rc - end + if R[c, c]!=0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + sign_Rc = sign(R[c, c]) + if !isone(sign_Rc) + R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. + Q[:, c] *= sign_Rc + end + end end return (Q, R) end @@ -433,11 +437,13 @@ function ql_positive(M::AbstractMatrix) nr, nc = size(L) dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr for c in 1:(nc - dc) - sign_Lc = sign(L[c, c + dc]) - if c <= nr && !isone(sign_Lc) - L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. - Q[:, c] *= conj(sign_Lc) - end + if L[c, c + dc]!=0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + sign_Lc = sign(L[c, c + dc]) + if c <= nr && !isone(sign_Lc) + L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. + Q[:, c] *= conj(sign_Lc) + end + end end return (Q, L) end diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index dd7058f376..24ca696459 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,9 +20,10 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -@testset "Dense $qx decomposition, elt=$elt, positve=$positive" for qx in [qr, ql], +@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular" for qx in [qr, ql], elt in [Float64, ComplexF64, Float32, ComplexF32], - positive in [false, true] + positive in [false, true], + singular in [false, true] eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. n, m = 4, 8 @@ -31,6 +32,12 @@ end # Wide matrix (more columns than rows) # A = randomTensor(elt, (n, m)) + # We want to test 0.0 on the diagonal. We need make all roaw equal to gaurantee this with numerical roundoff. + if singular + for i in 2:n + A[i,:]=A[1,:] + end + end Q, X = qx(A; positive=positive) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Id atol = eps @@ -46,6 +53,12 @@ end # Tall matrix (more rows than cols) # A = randomTensor(elt, (m, n)) #Tall array + # We want to test 0.0 on the diagonal. We need make all rows equal to gaurantee this with numerical roundoff. + if singular + for i in 2:m + A[i,:]=A[1,:] + end + end Q, X = qx(A; positive=positive) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Id atol = eps From 04cccd7c00baa272dd6c9d83696e9da64bfc3f63 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 16 Mar 2023 12:39:26 -0600 Subject: [PATCH 69/90] Remove where {T} for low level ql routine. --- NDTensors/src/linearalgebra/linearalgebra.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 1f004fc673..3b396901c5 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -452,8 +452,9 @@ end # Lapack replaces A with Q & R carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function ql(A::AbstractMatrix{T}; kwargs...) where {T} +function ql(A::AbstractMatrix; kwargs...) Base.require_one_based_indexing(A) + T=eltype(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) copyto!(AA, A) return ql!(AA; kwargs...) From b22f8a35015da64eff277603c2a1ab30c5ed0aa2 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 16 Mar 2023 12:40:31 -0600 Subject: [PATCH 70/90] Format --- NDTensors/src/linearalgebra/linearalgebra.jl | 12 ++++++------ NDTensors/test/linearalgebra.jl | 9 ++++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 3b396901c5..2a62a0a7bd 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -412,13 +412,13 @@ function qr_positive(M::AbstractMatrix) Q = convert(Matrix, sparseQ) nc = size(Q, 2) for c in 1:nc - if R[c, c]!=0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + if R[c, c] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. sign_Rc = sign(R[c, c]) if !isone(sign_Rc) R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. Q[:, c] *= sign_Rc end - end + end end return (Q, R) end @@ -437,13 +437,13 @@ function ql_positive(M::AbstractMatrix) nr, nc = size(L) dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr for c in 1:(nc - dc) - if L[c, c + dc]!=0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + if L[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. sign_Lc = sign(L[c, c + dc]) if c <= nr && !isone(sign_Lc) L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. Q[:, c] *= conj(sign_Lc) end - end + end end return (Q, L) end @@ -452,9 +452,9 @@ end # Lapack replaces A with Q & R carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function ql(A::AbstractMatrix; kwargs...) +function ql(A::AbstractMatrix; kwargs...) Base.require_one_based_indexing(A) - T=eltype(A) + T = eltype(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) copyto!(AA, A) return ql!(AA; kwargs...) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 24ca696459..3a5a8902c7 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,7 +20,10 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular" for qx in [qr, ql], +@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular" for qx in + [ + qr, ql + ], elt in [Float64, ComplexF64, Float32, ComplexF32], positive in [false, true], singular in [false, true] @@ -35,7 +38,7 @@ end # We want to test 0.0 on the diagonal. We need make all roaw equal to gaurantee this with numerical roundoff. if singular for i in 2:n - A[i,:]=A[1,:] + A[i, :] = A[1, :] end end Q, X = qx(A; positive=positive) #X is R or L. @@ -56,7 +59,7 @@ end # We want to test 0.0 on the diagonal. We need make all rows equal to gaurantee this with numerical roundoff. if singular for i in 2:m - A[i,:]=A[1,:] + A[i, :] = A[1, :] end end Q, X = qx(A; positive=positive) From 049207127a90eae6451f2b341a7b074153446442 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 20 Mar 2023 12:06:53 -0600 Subject: [PATCH 71/90] Clean up and comment code. --- NDTensors/src/linearalgebra/linearalgebra.jl | 75 ++++++++++---------- NDTensors/test/linearalgebra.jl | 53 ++++++-------- 2 files changed, 61 insertions(+), 67 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 1a6d6a64df..16e9c71cc4 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -367,42 +367,45 @@ function LinearAlgebra.eigen( return D, V, spec end # -# QR rank reduction helpers -# -function find_zero_rows(R::AbstractMatrix, rr_cutoff::Float64)::Array{Bool} - nr, nc = size(R) - return map((r) -> (maximum(abs.(R[r, 1:nc])) <= rr_cutoff), 1:nr) -end - -# -# Trim out zero rows of R within tolerance rr_cutoff. Also trim the corresponding columns -# of Q. +# Trim out zero rows of R/X within tolerance rr_cutoff. Also trim the corresponding columns +# of Q. X = R or L # function trim_rows( - R::AbstractMatrix, Q::AbstractMatrix, rr_cutoff::Float64 -) where {ElT,IndsT} - zeros = find_zero_rows(R, rr_cutoff) + Q::AbstractMatrix, X::AbstractMatrix, rr_cutoff::Float64 +) + # + # Find and count the zero rows. Bail out if there are none. + # + Xnr, Xnc = size(X) + Qnr, Qnc = size(Q) + @assert Xnr==Qnc #Sanity check. + zeros = map((r) -> (maximum(abs.(X[r, 1:Xnc])) <= rr_cutoff), 1:Xnr) num_zero_rows = sum(zeros) if num_zero_rows == 0 - return R, Q + return Q, X end - #println("Rank Reveal removing $num_zero_rows rows with log10(rr_cutoff)=$(log10(rr_cutoff))") - Rnr, Rnc = size(R) - Qnr, Qnc = size(Q) - #@assert Rnr==Qnc Q is strided so we can't asume this - R1nr = Rnr - num_zero_rows - T = eltype(R) - R1 = Matrix{T}(undef, R1nr, Rnc) - Q1 = Matrix{T}(undef, Qnr, R1nr) - r1 = 1 - for r in 1:Rnr + # + # Debug info + # println("Rank Reveal removing $num_zero_rows rows with log10(rr_cutoff)=$(log10(rr_cutoff))") + # + # Create new Q & X martrices with reduced size. + # + X1nr = Xnr - num_zero_rows #new dim between Q & X + T = eltype(X) + X1 = Matrix{T}(undef, X1nr, Xnc) + Q1 = Matrix{T}(undef, Qnr, X1nr) + # + # Transfer non-zero rows of X and corresponding columns of Q. + # + r1 = 1 #Row/col counter in new reduced Q & X + for r in 1:Xnr if zeros[r] == false - R1[r1, :] = R[r, :] #transfer row + X1[r1, :] = X[r, :] #transfer row Q1[:, r1] = Q[:, r] #transfer column r1 += 1 #next row in rank reduced matrices. end #if zero end #for r - return R1, Q1 + return Q1, X1 end function qr(T::DenseTensor{<:Any,2}; positive=false, kwargs...) @@ -421,27 +424,25 @@ end # function qx(qx::Function, T::DenseTensor{<:Any,2}; rr_cutoff=-1.0, kwargs...) QM1, XM = qx(matrix(T)) - QM=convert(Matrix, QM1) - # Be aware that if positive==false, then typeof(QM)=LinearAlgebra.QRCompactWYQ, not Matrix - # It gets converted to matrix below. + # When qx=qr typeof(QM1)==LinearAlgebra.QRCompactWYQ + # When qx=ql typeof(QM1)==Matrix and this should be a no-op + QM=Matrix(QM1) # # Do row removal for rank revealing QR/QL # if rr_cutoff >= 0.0 - XM, QM = trim_rows(XM, QM, rr_cutoff) + QM, XM = trim_rows(QM, XM, rr_cutoff) end # # Make the new indices to go onto Q and X # - IndsT = indstype(T) #get the index type - IndexT = IndsT.parameters[1] #establish the index type. + IndsT = indstype(T) #get the indices type + @assert IndsT.parameters[1]==IndsT.parameters[2] #they better be the same! + IndexT = IndsT.parameters[1] #establish the single index type. q = IndexT(size(XM)[1]) #create the Q--X link index. - # q, r = inds(T) - # q = dim(q) < dim(r) ? sim(q) : sim(r) - IndsT = indstype(T) #get the index type Qinds = IndsT((ind(T, 1), q)) Xinds = IndsT((q, ind(T, 2))) - Q = tensor(Dense(vec(Matrix(QM))), Qinds) #Q was strided + Q = tensor(Dense(vec(QM)), Qinds) X = tensor(Dense(vec(XM)), Xinds) return Q, X end @@ -500,7 +501,7 @@ function ql_positive(M::AbstractMatrix) end # -# Lapack replaces A with Q & R carefully packed together. So here we just copy a +# Lapack replaces A with Q & L carefully packed together. So here we just copy a # before letting lapack overwirte it. # function ql(A::AbstractMatrix; kwargs...) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 6682b3088a..a516701e6e 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -20,31 +20,35 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular" for qx in +@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, rank_reveal=$rank_reveal" for qx in [ qr, ql ], elt in [Float64, ComplexF64, Float32, ComplexF32], positive in [false, true], - singular in [false, true] + singular in [false, true], + rank_reveal in [false,true], eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. + rr_cutoff = rank_reveal ? eps*1.0 : -1.0 n, m = 4, 8 - Id = Diagonal(fill(1.0, min(n, m))) # # Wide matrix (more columns than rows) # A = randomTensor(elt, (n, m)) - # We want to test 0.0 on the diagonal. We need make all roaw equal to gaurantee this with numerical roundoff. + # We want to test 0.0 on the diagonal. We need make all rows linearly dependent + # gaurantee this with numerical roundoff. if singular for i in 2:n - A[i, :] = A[1, :] + A[i, :] = A[1, :]*1.05^n end end - Q, X = qx(A; positive=positive) #X is R or L. + Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff) #X is R or L. @test A ≈ Q * X atol = eps - @test array(Q)' * array(Q) ≈ Id atol = eps - @test array(Q) * array(Q)' ≈ Id atol = eps + @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps + if dim(Q, 1)==dim(Q, 2) + @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, min(n, m))) atol = eps + end if positive nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 @@ -52,6 +56,10 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end + if rr_cutoff>0 && singular + @test dim(Q, 2)==1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). + @test dim(X ,1)==1 #Redundant? + end # # Tall matrix (more rows than cols) # @@ -62,9 +70,10 @@ end A[i, :] = A[1, :] end end - Q, X = qx(A; positive=positive) + Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff) @test A ≈ Q * X atol = eps - @test array(Q)' * array(Q) ≈ Id atol = eps + @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps + #@test array(Q) * array(Q)' no such relationship for tall matrices. if positive nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 @@ -72,26 +81,10 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end + if rr_cutoff>0 && singular + @test dim(Q, 2)==1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). + @test dim(X ,1)==1 #Redundant? + end end -@testset "Dense Rank revealing QR/RQ decomposition" begin - n, m = 4, 8 - A = randomTensor(Float64,(n, m)) - # make some columns lineary dependent - A[2, :] = A[1, :] * 1.1 - A[4, :] = A[1, :] * 2.1 - Q, R = qr(A; rr_cutoff=1e-12) - @test dim(Q, 2) == n - 2 #make 2 columns actually got removed. - @test dim(R, 1) == n - 2 #make 2 rows actually got removed. - @test A ≈ Q * R atol = 1e-12 - nm = dim(Q, 2) - @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 - - Q, L = ql(A; rr_cutoff=1e-12) - @test dim(Q, 2) == n - 2 #make 2 rows actually got removed. - @test dim(L, 1) == n - 2 #make 2 columns actually got removed. - @test A ≈ Q * L atol = 1e-12 - nm = dim(Q, 2) - @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, nm)) atol = 1e-12 -end nothing From 62ed9cc5d0acc83905323d528e932b6d0b0a4d91 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 20 Mar 2023 12:29:44 -0600 Subject: [PATCH 72/90] Format and add rr-verbose flag for optional rank reduction output. --- NDTensors/src/linearalgebra/linearalgebra.jl | 25 +++++++++------ NDTensors/test/linearalgebra.jl | 32 +++++++++++--------- test/base/test_decomp.jl | 4 +-- 3 files changed, 34 insertions(+), 27 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 16e9c71cc4..63956ca7d5 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -371,22 +371,27 @@ end # of Q. X = R or L # function trim_rows( - Q::AbstractMatrix, X::AbstractMatrix, rr_cutoff::Float64 -) + Q::AbstractMatrix, X::AbstractMatrix, rr_cutoff::Float64; rr_verbose=false, kwargs... +) # # Find and count the zero rows. Bail out if there are none. # Xnr, Xnc = size(X) Qnr, Qnc = size(Q) - @assert Xnr==Qnc #Sanity check. + @assert Xnr == Qnc #Sanity check. zeros = map((r) -> (maximum(abs.(X[r, 1:Xnc])) <= rr_cutoff), 1:Xnr) num_zero_rows = sum(zeros) if num_zero_rows == 0 return Q, X end # - # Debug info - # println("Rank Reveal removing $num_zero_rows rows with log10(rr_cutoff)=$(log10(rr_cutoff))") + # Useful output for trouble shooting. + # + if rr_verbose + println( + "Rank Reveal removing $num_zero_rows rows with log10(rr_cutoff)=$(log10(rr_cutoff))" + ) + end # # Create new Q & X martrices with reduced size. # @@ -426,23 +431,23 @@ function qx(qx::Function, T::DenseTensor{<:Any,2}; rr_cutoff=-1.0, kwargs...) QM1, XM = qx(matrix(T)) # When qx=qr typeof(QM1)==LinearAlgebra.QRCompactWYQ # When qx=ql typeof(QM1)==Matrix and this should be a no-op - QM=Matrix(QM1) + QM = Matrix(QM1) # - # Do row removal for rank revealing QR/QL + # Do row removal for rank revealing QR/QL. Probably not worth it to elminate the if statement # if rr_cutoff >= 0.0 - QM, XM = trim_rows(QM, XM, rr_cutoff) + QM, XM = trim_rows(QM, XM, rr_cutoff; kwargs...) end # # Make the new indices to go onto Q and X # IndsT = indstype(T) #get the indices type - @assert IndsT.parameters[1]==IndsT.parameters[2] #they better be the same! + @assert IndsT.parameters[1] == IndsT.parameters[2] #they better be the same! IndexT = IndsT.parameters[1] #establish the single index type. q = IndexT(size(XM)[1]) #create the Q--X link index. Qinds = IndsT((ind(T, 1), q)) Xinds = IndsT((q, ind(T, 2))) - Q = tensor(Dense(vec(QM)), Qinds) + Q = tensor(Dense(vec(QM)), Qinds) X = tensor(Dense(vec(XM)), Xinds) return Q, X end diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index a516701e6e..8325025d59 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -21,16 +21,17 @@ end end @testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, rank_reveal=$rank_reveal" for qx in - [ + [ qr, ql ], elt in [Float64, ComplexF64, Float32, ComplexF32], positive in [false, true], singular in [false, true], - rank_reveal in [false,true], + rank_reveal in [false, true], - eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. - rr_cutoff = rank_reveal ? eps*1.0 : -1.0 + eps in Base.eps(real(elt)) * 30 + #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. + rr_cutoff = rank_reveal ? eps * 1.0 : -1.0 n, m = 4, 8 # # Wide matrix (more columns than rows) @@ -40,13 +41,14 @@ end # gaurantee this with numerical roundoff. if singular for i in 2:n - A[i, :] = A[1, :]*1.05^n + A[i, :] = A[1, :] * 1.05^n end end - Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff) #X is R or L. + # you can set rr_verbose=true if you want to get debug output on rank reduction. + Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff, rr_verbose=false) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps - if dim(Q, 1)==dim(Q, 2) + if dim(Q, 1) == dim(Q, 2) @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, min(n, m))) atol = eps end if positive @@ -56,10 +58,10 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if rr_cutoff>0 && singular - @test dim(Q, 2)==1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). - @test dim(X ,1)==1 #Redundant? - end + if rr_cutoff > 0 && singular + @test dim(Q, 2) == 1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). + @test dim(X, 1) == 1 #Redundant? + end # # Tall matrix (more rows than cols) # @@ -70,7 +72,7 @@ end A[i, :] = A[1, :] end end - Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff) + Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff, rr_verbose=false) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. @@ -81,9 +83,9 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if rr_cutoff>0 && singular - @test dim(Q, 2)==1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). - @test dim(X ,1)==1 #Redundant? + if rr_cutoff > 0 && singular + @test dim(Q, 2) == 1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). + @test dim(X, 1) == 1 #Redundant? end end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 1c847bbbdf..ff9c0ff901 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -387,7 +387,7 @@ end end @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in - [1,2,3], + [1, 2, 3], elt in [Float64, ComplexF64] l = Index(5, "l") @@ -397,7 +397,7 @@ end Ainds = inds(A) A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q = qr(A, Ainds[1:ninds]; rr_cutoff=1e-12) + Q, R, q = qr(A, Ainds[1:ninds]; rr_cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 From 0ec04b7b575adf827bd18c279ab51863833b059d Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 21 Mar 2023 08:04:34 -0600 Subject: [PATCH 73/90] Remove the old test/decomp/jl file Somehow it crept back in after merging the reorg. --- test/decomp.jl | 525 ------------------------------------------------- 1 file changed, 525 deletions(-) delete mode 100644 test/decomp.jl diff --git a/test/decomp.jl b/test/decomp.jl deleted file mode 100644 index e25d5749fc..0000000000 --- a/test/decomp.jl +++ /dev/null @@ -1,525 +0,0 @@ -using ITensors, NDTensors, LinearAlgebra, Test - -# -# Decide of rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. -# -function is_upper(At::Tensor)::Bool - nr, nc = dims(At) - dc = Base.max(0, dim(nr) - dim(nc)) #column off set for rectangular matrices. - for i in CartesianIndices(At) - if i[1] > i[2] + dc - if abs(At[i]) > 0.0 #row>col is lower triangle - return false - end - end - end - return true -end - -# -# A must be rank 2 -# -function is_upper(l::Index, A::ITensor, r::Index)::Bool - @assert length(inds(A)) == 2 - if inds(A) != IndexSet(l, r) - A = permute(A, l, r) - end - return is_upper(NDTensors.tensor(A)) -end - -# -# With left index specified -# -function is_upper(l::Index, A::ITensor)::Bool - other = noncommoninds(A, l) - if (length(other) == 1) - return is_upper(l, A, other[1]) - else - # use combiner to gather all the "other" indices into one. - C = combiner(other...) - AC = A * C - return is_upper(l, AC, combinedind(C)) - end -end -is_lower(l::Index, A::ITensor)::Bool = is_upper(A, l) - -# -# With right index specified -# -function is_upper(A::ITensor, r::Index)::Bool - other = noncommoninds(A, r) - if (length(other) == 1) - return is_upper(other[1], A, r) - else - C = combiner(other...) - AC = A * C - return is_upper(combinedind(C), AC, r) - end -end -is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) - -# -# Makes all columns lineary depenedent but scaled differently. -# -function rank_fix(A::ITensor, Linds...) - Lis = commoninds(A, (Linds...)) - Ris = uniqueinds(A, Lis) - # - # Use combiners to render A down to a rank 2 tensor ready matrix QR routine. - # - CL, CR = combiner(Lis...), combiner(Ris...) - cL, cR = combinedind(CL), combinedind(CR) - AC = A * CR * CL - if inds(AC) != IndexSet(cL, cR) - AC = permute(AC, cL, cR) - end - At = tensor(AC) - nc = dim(At, 2) - @assert nc >= 2 - for c in 2:nc - At[:, c] = At[:, 1] * 1.05^c - end - return itensor(At) * dag(CL) * dag(CR) -end - -# -# verify all QN directions were preserved for A=Q*R decompositions. -# -function test_directions(A::ITensor, Q::ITensor, R::ITensor, q::Index) - for i in noncommoninds(Q, q) - @test dir(findinds(A; tags=tags(i), plev=plev(i))[1]) == dir(i) - end - for i in noncommoninds(R, q) - @test dir(findinds(A; tags=tags(i), plev=plev(i))[1]) == dir(i) - end -end - -@testset "ITensor Decompositions" begin - @testset "truncate!" begin - a = [0.1, 0.01, 1e-13] - @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == - (1e-13, (0.01 + 1e-13) / 2) - @test length(a) == 2 - - # Negative definite spectrum treated by taking - # square (if singular values) or absolute values - a = [-0.12, -0.1] - @test NDTensors.truncate!(a) == (0.0, 0.0) - @test length(a) == 2 - - a = [-0.1, -0.01, -1e-13] - @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == - (1e-13, (0.01 + 1e-13) / 2) - @test length(a) == 2 - end - - @testset "factorize" begin - i = Index(2, "i") - j = Index(2, "j") - A = randomITensor(i, j) - @test_throws ErrorException factorize(A, i; dir="left") - @test_throws ErrorException factorize(A, i; ortho="fakedir") - end - - @testset "factorize with eigen_perturbation" begin - l = Index(4, "l") - s1 = Index(2, "s1") - s2 = Index(2, "s2") - r = Index(4, "r") - - phi = randomITensor(l, s1, s2, r) - - drho = randomITensor(l', s1', l, s1) - drho += swapprime(drho, 0, 1) - drho .*= 1E-5 - - U, B = factorize(phi, (l, s1); ortho="left", eigen_perturbation=drho) - @test norm(U * B - phi) < 1E-5 - - # Not allowed to use eigen_perturbation with which_decomp - # other than "automatic" or "eigen": - @test_throws ErrorException factorize( - phi, (l, s1); ortho="left", eigen_perturbation=drho, which_decomp="svd" - ) - end - - # Julia 1.6 makes it very difficult to split the exceedingly long line of code. - @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in - [ - 0, 1, 2, 3 - ], - elt in [Float64, ComplexF64] - - l = Index(5, "l") - s = Index(2, "s") - r = Index(5, "r") - A = randomITensor(elt, l, s, r) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 3 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, R) - @test hastags(q, "qr") - if (length(inds(R)) > 1) - @test is_upper(q, R) #specify the left index - end - - #Julia 1.6 seems to be very erratic about seeing exported symbols like rq. - R, Q, q = rq(A, Ainds[1:ninds]) - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, R) - @test hastags(q, "rq") - if (length(inds(R)) > 1) - @test is_upper(R, q) #specify the right index - end - - L, Q, q = lq(A, Ainds[1:ninds]) - @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, L) - @test hastags(q, "lq") - if (length(inds(L)) > 1) - @test is_lower(L, q) #specify the right index - end - - Q, L, q = ql(A, Ainds[1:ninds]) - @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. - @test length(inds(L)) == 3 - ninds + 1 - @test A ≈ Q * L atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, L) - @test hastags(q, "ql") - if (length(inds(L)) > 1) - @test is_lower(q, L) #specify the right index - end - end - - # Julia 1.6 makes it very difficult to split the exceedingly long line of code. - @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in - [1, 2, 3], - elt in [Float64, ComplexF64] - - l = Index(5, "l") - s = Index(2, "s") - r = Index(5, "r") - A = randomITensor(elt, l, s, s', r) - - Ainds = inds(A) - A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q = qr(A, Ainds[1:ninds]; rr_cutoff=1e-12) #calling qr(A) triggers not supported error. - @test dim(q) == 1 #check that we found rank==1 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, q = rq(A, Ainds[1:ninds]; rr_cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 - @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - L, Q, q = lq(A, Ainds[1:ninds]; rr_cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 - @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - Q, L, q = ql(A, Ainds[1:ninds]; rr_cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 - @test A ≈ Q * L atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ - 0, 1, 2, 3, 4 - ] - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") - A = randomITensor(l, s, s', r) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 4 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, q = rq(A, Ainds[1:ninds]) - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 4 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "QR/RQ block sparse on MPS tensor with all possible collections on Q,R" for ninds in - [ - 0, 1, 2, 3 - ] - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -2), QN()] - l = dag(Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="l")) - s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="r") - A = randomITensor(l, s, r) - @test flux(A) == QN("Sz", 0) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 3 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] - test_directions(A, Q, R, q) - @test A ≈ Q * R atol = 1e-13 - - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - expected_Rflux = [QN(), QN("Sz", 2), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] - expected_Qflux = [QN("Sz", 0), QN("Sz", -2), QN("Sz", -2), QN("Sz", 0), QN()] - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] - test_directions(A, Q, R, q) - @test A ≈ Q * R atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - - @testset "QR/RQ block sparse on MPO tensor with all possible collections on Q,R" for ninds in - [ - 0, 1, 2, 3, 4 - ] - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN()] - l = dag(Index(QN("Sz", 0) => 3; tags="l")) - s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 3; tags="r") - A = randomITensor(l, s, dag(s'), r) - @test flux(A) == QN("Sz", 0) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 4 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] - test_directions(A, Q, R, q) - @test A ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", -2), QN("Sz", 0), QN("Sz", 0)] - expected_Rflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 2), QN("Sz", 0), QN()] - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 4 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_Rflux[ninds + 1] - test_directions(A, Q, R, q) - @test A ≈ Q * R atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - - @testset "QR/RQ dense with positive R" begin - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") - A = randomITensor(l, s, s', r) - Q, R, q = qr(A, l, s, s'; positive=true) - @test min(diag(R)...) > 0.0 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, r; positive=true) - @test min(diag(R)...) > 0.0 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "QR/RQ block sparse with positive R" begin - l = dag(Index(QN("Sz", 0) => 3; tags="l")) - s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 3; tags="r") - A = randomITensor(l, s, dag(s'), r) - Q, R, q = qr(A, l, s, s'; positive=true) - @test min(diag(R)...) > 0.0 - test_directions(A, Q, R, q) - @test A ≈ Q * R atol = 1e-13 - R, Q, q = rq(A, r; positive=true) - @test min(diag(R)...) > 0.0 - test_directions(A, Q, R, q) - @test A ≈ Q * R atol = 1e-13 - end - - @testset "QR Heisenberg MPO tensors" begin - N = 4 - sites = siteinds("S=1", N; conserve_qns=true) - ampo = OpSum() - for j in 1:(N - 1) - ampo .+= 0.5, "S+", j, "S-", j + 1 - ampo .+= 0.5, "S-", j, "S+", j + 1 - ampo .+= "Sz", j, "Sz", j + 1 - end - H = MPO(ampo, sites; splitblocks=false) - for n in 1:(N - 1) - W = H[n] - @test flux(W) == QN("Sz", 0) - ilr = filterinds(W; tags="l=$n")[1] - ilq = noncommoninds(W, ilr) - Q, R, q = qr(W, ilq) - @test flux(Q) == QN("Sz", 4) - @test flux(R) == QN("Sz", -4) - test_directions(W, Q, R, q) - @test W ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - R, Q, q = rq(W, ilr) - @test flux(Q) == QN("Sz", -4) - @test flux(R) == QN("Sz", 4) - @test W ≈ Q * R atol = 1e-13 - test_directions(W, Q, R, q) - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - Q, L, q = ql(W, ilq) - @test flux(Q) == QN("Sz", -4) - @test flux(L) == QN("Sz", 4) - @test W ≈ Q * L atol = 1e-13 - test_directions(W, Q, L, q) - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - L, Q, q = lq(W, ilr) - @test flux(Q) == QN("Sz", 4) - @test flux(L) == QN("Sz", -4) - @test W ≈ Q * L atol = 1e-13 - test_directions(W, Q, L, q) - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - Q, L, q = ql(W, ilq) - @test flux(Q) == QN("Sz", -4) - @test flux(L) == QN("Sz", 4) - @test W ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - L, Q, q = lq(W, ilr) - @test flux(Q) == QN("Sz", 4) - @test flux(L) == QN("Sz", -4) - @test W ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - end - - @testset "factorize with QR" begin - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") - A = randomITensor(l, s, r) - Q, R, = factorize(A, l, s; which_decomp="qr") - q = commonind(Q, R) - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, = factorize(A, l, s; which_decomp="qr", ortho="right") - q = commonind(Q, R) - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "eigen" begin - i = Index(2, "i") - j = Index(2, "j") - A = randomITensor(i, i') - eigA = eigen(A) - Dt, Ut = eigen(NDTensors.tensor(A)) - eigArr = eigen(array(A)) - @test diag(array(eigA.D), 0) ≈ eigArr.values - @test diag(array(Dt), 0) == eigArr.values - - @test_throws ArgumentError eigen(ITensor(NaN, i', i)) - @test_throws ArgumentError eigen(ITensor(NaN, i', i); ishermitian=true) - @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i)) - @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i); ishermitian=true) - @test_throws ArgumentError eigen(ITensor(Inf, i', i)) - @test_throws ArgumentError eigen(ITensor(Inf, i', i); ishermitian=true) - @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i)) - @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i); ishermitian=true) - end - - @testset "exp function" begin - At = rand(10, 10) - k = Index(10, "k") - A = itensor(At + transpose(At), k, k') - @test Array(exp(Hermitian(NDTensors.tensor(A)))) ≈ exp(At + transpose(At)) - end - - @testset "Spectrum" begin - i = Index(100, "i") - j = Index(100, "j") - - U, S, V = svd(rand(100, 100)) - S ./= norm(S) - A = itensor(U * ITensors.diagm(0 => S) * V', i, j) - - spec = svd(A, i).spec - - @test eigs(spec) ≈ S .^ 2 - @test truncerror(spec) == 0.0 - - spec = svd(A, i; maxdim=length(S) - 3).spec - @test truncerror(spec) ≈ sum(S[(end - 2):end] .^ 2) - - @test entropy(Spectrum([0.5; 0.5], 0.0)) == log(2) - @test entropy(Spectrum([1.0], 0.0)) == 0.0 - @test entropy(Spectrum([0.0], 0.0)) == 0.0 - - @test isnothing(eigs(Spectrum(nothing, 1.0))) - @test_throws ErrorException entropy(Spectrum(nothing, 1.0)) - @test truncerror(Spectrum(nothing, 1.0)) == 1.0 - end - - @testset "Eigen QN flux regression test" begin - cutoff = 1E-12 - N = 4 - s = siteinds("S=1", N; conserve_qns=true) - A = randomITensor(QN("Sz", 2), s[1], s[2], s[3]) - - R = A * dag(prime(A, s[1], s[2])) - F = eigen(R, (s[1], s[2]), (s[1]', s[2]')) - - @test flux(F.Vt) == QN("Sz", 0) - end - - @testset "SVD block_mindim keyword" begin - i = Index( - [ - QN("Sz", 4) => 1, - QN("Sz", 2) => 4, - QN("Sz", 0) => 6, - QN("Sz", -2) => 4, - QN("Sz", -4) => 1, - ], - "i", - ) - j = sim(i) - X = randomITensor(QN("Sz", 0), i, j) - - min_blockdim = 2 - U, S, V = svd(X, i; cutoff=1E-1, min_blockdim) - u = commonind(S, U) - - @test nblocks(u) == nblocks(i) - for b in 1:nblocks(u) - @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim - end - end -end - -nothing From 19615201769909787aea790562dd2b8b8bbb27ed Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 21 Mar 2023 16:39:54 -0600 Subject: [PATCH 74/90] Set Random seed to keeps tests deterministic. --- NDTensors/test/linearalgebra.jl | 15 +++++++++------ test/base/test_decomp.jl | 4 +++- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 8325025d59..98a9fe7e72 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,6 +1,9 @@ using NDTensors using LinearAlgebra using Test +using Random + +Random.seed!(314159) @testset "random_orthog" begin n, m = 10, 4 @@ -40,7 +43,7 @@ end # We want to test 0.0 on the diagonal. We need make all rows linearly dependent # gaurantee this with numerical roundoff. if singular - for i in 2:n + for i in 2:3 A[i, :] = A[1, :] * 1.05^n end end @@ -59,8 +62,8 @@ end @test all(imag(diagX) .== 0.0) end if rr_cutoff > 0 && singular - @test dim(Q, 2) == 1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). - @test dim(X, 1) == 1 #Redundant? + @test dim(Q, 2) == 2 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). + @test dim(X, 1) == 2 #Redundant? end # # Tall matrix (more rows than cols) @@ -68,7 +71,7 @@ end A = randomTensor(elt, (m, n)) #Tall array # We want to test 0.0 on the diagonal. We need make all rows equal to gaurantee this with numerical roundoff. if singular - for i in 2:m + for i in 2:4 A[i, :] = A[1, :] end end @@ -84,8 +87,8 @@ end @test all(imag(diagX) .== 0.0) end if rr_cutoff > 0 && singular - @test dim(Q, 2) == 1 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). - @test dim(X, 1) == 1 #Redundant? + @test dim(Q, 2) == 4 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). + @test dim(X, 1) == 4 #Redundant? end end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index ff9c0ff901..517944226d 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -1,4 +1,6 @@ -using ITensors, LinearAlgebra, Test +using ITensors, LinearAlgebra, Test, Random + +Random.seed!(314159) # # Decide if rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. From 491b830989a87088cdefa217c6570306e3375c74 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 21 Mar 2023 16:55:35 -0600 Subject: [PATCH 75/90] Can't use Random on the NDTensors CI machine --- NDTensors/test/linearalgebra.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 98a9fe7e72..5222d214ed 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,9 +1,10 @@ using NDTensors using LinearAlgebra using Test -using Random -Random.seed!(314159) +# Not available on CI machine that test NDTensors. +# using Random +# Random.seed!(314159) @testset "random_orthog" begin n, m = 10, 4 From fdce8fe9a7117f057737311d675398d25fa90ea9 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 13 Apr 2023 12:51:49 -0600 Subject: [PATCH 76/90] Fix names cutoff and verbose Take out rr prefixes --- NDTensors/src/linearalgebra/linearalgebra.jl | 16 ++++++++-------- NDTensors/test/linearalgebra.jl | 12 ++++++------ test/base/test_decomp.jl | 8 ++++---- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 63956ca7d5..4ac9fbeec5 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -367,11 +367,11 @@ function LinearAlgebra.eigen( return D, V, spec end # -# Trim out zero rows of R/X within tolerance rr_cutoff. Also trim the corresponding columns +# Trim out zero rows of R/X within tolerance cutoff. Also trim the corresponding columns # of Q. X = R or L # function trim_rows( - Q::AbstractMatrix, X::AbstractMatrix, rr_cutoff::Float64; rr_verbose=false, kwargs... + Q::AbstractMatrix, X::AbstractMatrix, cutoff::Float64; verbose=false, kwargs... ) # # Find and count the zero rows. Bail out if there are none. @@ -379,7 +379,7 @@ function trim_rows( Xnr, Xnc = size(X) Qnr, Qnc = size(Q) @assert Xnr == Qnc #Sanity check. - zeros = map((r) -> (maximum(abs.(X[r, 1:Xnc])) <= rr_cutoff), 1:Xnr) + zeros = map((r) -> (maximum(abs.(X[r, 1:Xnc])) <= cutoff), 1:Xnr) num_zero_rows = sum(zeros) if num_zero_rows == 0 return Q, X @@ -387,9 +387,9 @@ function trim_rows( # # Useful output for trouble shooting. # - if rr_verbose + if verbose println( - "Rank Reveal removing $num_zero_rows rows with log10(rr_cutoff)=$(log10(rr_cutoff))" + "Rank Reveal removing $num_zero_rows rows with log10(cutoff)=$(log10(cutoff))" ) end # @@ -427,7 +427,7 @@ end # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2}; rr_cutoff=-1.0, kwargs...) +function qx(qx::Function, T::DenseTensor{<:Any,2}; cutoff=-1.0, kwargs...) QM1, XM = qx(matrix(T)) # When qx=qr typeof(QM1)==LinearAlgebra.QRCompactWYQ # When qx=ql typeof(QM1)==Matrix and this should be a no-op @@ -435,8 +435,8 @@ function qx(qx::Function, T::DenseTensor{<:Any,2}; rr_cutoff=-1.0, kwargs...) # # Do row removal for rank revealing QR/QL. Probably not worth it to elminate the if statement # - if rr_cutoff >= 0.0 - QM, XM = trim_rows(QM, XM, rr_cutoff; kwargs...) + if cutoff >= 0.0 + QM, XM = trim_rows(QM, XM, cutoff; kwargs...) end # # Make the new indices to go onto Q and X diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 5222d214ed..79d52bbe26 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -35,7 +35,7 @@ end eps in Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. - rr_cutoff = rank_reveal ? eps * 1.0 : -1.0 + cutoff = rank_reveal ? eps * 1.0 : -1.0 n, m = 4, 8 # # Wide matrix (more columns than rows) @@ -48,8 +48,8 @@ end A[i, :] = A[1, :] * 1.05^n end end - # you can set rr_verbose=true if you want to get debug output on rank reduction. - Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff, rr_verbose=false) #X is R or L. + # you can set verbose=true if you want to get debug output on rank reduction. + Q, X = qx(A; positive=positive, cutoff=cutoff, verbose=false) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps if dim(Q, 1) == dim(Q, 2) @@ -62,7 +62,7 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if rr_cutoff > 0 && singular + if cutoff > 0 && singular @test dim(Q, 2) == 2 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). @test dim(X, 1) == 2 #Redundant? end @@ -76,7 +76,7 @@ end A[i, :] = A[1, :] end end - Q, X = qx(A; positive=positive, rr_cutoff=rr_cutoff, rr_verbose=false) + Q, X = qx(A; positive=positive, cutoff=cutoff, verbose=false) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. @@ -87,7 +87,7 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if rr_cutoff > 0 && singular + if cutoff > 0 && singular @test dim(Q, 2) == 4 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). @test dim(X, 1) == 4 #Redundant? end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 517944226d..99dcc89535 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -399,22 +399,22 @@ end Ainds = inds(A) A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q = qr(A, Ainds[1:ninds]; rr_cutoff=1e-12) + Q, R, q = qr(A, Ainds[1:ninds]; cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, Ainds[1:ninds]; rr_cutoff=1e-12) + R, Q, q = rq(A, Ainds[1:ninds]; cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = lq(A, Ainds[1:ninds]; rr_cutoff=1e-12) + L, Q, q = lq(A, Ainds[1:ninds]; cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ql(A, Ainds[1:ninds]; rr_cutoff=1e-12) + Q, L, q = ql(A, Ainds[1:ninds]; cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 From 543d355a7c32fe2738336e6e3798e09b4d5c3021 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 13 Apr 2023 13:42:59 -0600 Subject: [PATCH 77/90] Unify positive gauge fix for qr/ql --- NDTensors/src/linearalgebra/linearalgebra.jl | 76 +++++--------------- 1 file changed, 19 insertions(+), 57 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 4ac9fbeec5..9d741d7df4 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -413,26 +413,22 @@ function trim_rows( return Q1, X1 end -function qr(T::DenseTensor{<:Any,2}; positive=false, kwargs...) - qxf = positive ? qr_positive : qr - return qx(qxf, T; kwargs...) -end - -function ql(T::DenseTensor{<:Any,2}; positive=false, kwargs...) - qxf = positive ? ql_positive : ql - return qx(qxf, T; kwargs...) -end - +qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) +ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) # # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2}; cutoff=-1.0, kwargs...) +function qx(qx::Function, T::DenseTensor{<:Any,2}; positive=false, cutoff=-1.0, kwargs...) QM1, XM = qx(matrix(T)) # When qx=qr typeof(QM1)==LinearAlgebra.QRCompactWYQ # When qx=ql typeof(QM1)==Matrix and this should be a no-op QM = Matrix(QM1) # + # Gauge fix diagonal of X into positive definite form. + # + positive && qx_positive!(qx,QM,XM) + # # Do row removal for rank revealing QR/QL. Probably not worth it to elminate the if statement # if cutoff >= 0.0 @@ -453,56 +449,22 @@ function qx(qx::Function, T::DenseTensor{<:Any,2}; cutoff=-1.0, kwargs...) end # -# Just flip signs between Q and R to get all the diagonals of R >=0. -# For rectangular M the indexing for "diagonal" is non-trivial. +# Semi generic function for gauge fixing the diagonal of X into positive definite form. +# becuase the diagonal is difficult to locate for rectangular X (it moves between R and L) +# we use qx==ql to know if X is lower or upper. # -""" - qr_positive(M::AbstractMatrix) - -Compute the QR decomposition of a matrix M -such that the diagonal elements of R are -non-negative. Such a QR decomposition of a -matrix is unique. Returns a tuple (Q,R). -""" -function qr_positive(M::AbstractMatrix) - sparseQ, R = qr(M) - Q = convert(Matrix, sparseQ) - nc = size(Q, 2) - for c in 1:nc - if R[c, c] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. - sign_Rc = sign(R[c, c]) - if !isone(sign_Rc) - R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. - Q[:, c] *= sign_Rc - end - end - end - return (Q, R) -end - -""" - ql_positive(M::AbstractMatrix) - -Compute the QL decomposition of a matrix M -such that the diagonal elements of L are -non-negative. Such a QL decomposition of a -matrix is unique. Returns a tuple (Q,L). -""" -function ql_positive(M::AbstractMatrix) - sparseQ, L = ql(M) - Q = convert(Matrix, sparseQ) - nr, nc = size(L) - dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr - for c in 1:(nc - dc) - if L[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. - sign_Lc = sign(L[c, c + dc]) - if c <= nr && !isone(sign_Lc) - L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. - Q[:, c] *= conj(sign_Lc) +function qx_positive!(qx::Function, Q::AbstractMatrix,X::AbstractMatrix) + nr, nc = size(X) + dc = (nc > nr && qx==ql) ? nc - nr : 0 #diag is shifted over by dc if nc>nr + for c in 1:Base.min(nr,nc) + if X[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + sign_Xc = sign(X[c, c + dc]) + if !isone(sign_Xc) + X[c, :] *= sign_Xc + Q[:, c] *= conj(sign_Xc) end end end - return (Q, L) end # From 4d22400a06d643c60407f9e7fb53d87882c0459d Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 13 Apr 2023 15:45:06 -0600 Subject: [PATCH 78/90] Implement colunm pivoting with row removal --- NDTensors/src/linearalgebra/linearalgebra.jl | 85 ++++++++++---------- NDTensors/test/linearalgebra.jl | 17 ++-- test/base/test_decomp.jl | 11 +-- 3 files changed, 56 insertions(+), 57 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 9d741d7df4..d75575cea7 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -367,22 +367,27 @@ function LinearAlgebra.eigen( return D, V, spec end # -# Trim out zero rows of R/X within tolerance cutoff. Also trim the corresponding columns -# of Q. X = R or L +# Trim out n rows of R based on norm(R_nn) (maximum(abs.(X[r, 1:Xnc])) <= cutoff), 1:Xnr) - num_zero_rows = sum(zeros) + last_row_to_keep=nr + for r in nr:-1:1 + if norm(R[r:nr,:])>cutoff + last_row_to_keep=r + break + end + end + + num_zero_rows=nr-last_row_to_keep if num_zero_rows == 0 - return Q, X + return Q, R end # # Useful output for trouble shooting. @@ -392,25 +397,8 @@ function trim_rows( "Rank Reveal removing $num_zero_rows rows with log10(cutoff)=$(log10(cutoff))" ) end - # - # Create new Q & X martrices with reduced size. - # - X1nr = Xnr - num_zero_rows #new dim between Q & X - T = eltype(X) - X1 = Matrix{T}(undef, X1nr, Xnc) - Q1 = Matrix{T}(undef, Qnr, X1nr) - # - # Transfer non-zero rows of X and corresponding columns of Q. - # - r1 = 1 #Row/col counter in new reduced Q & X - for r in 1:Xnr - if zeros[r] == false - X1[r1, :] = X[r, :] #transfer row - Q1[:, r1] = Q[:, r] #transfer column - r1 += 1 #next row in rank reduced matrices. - end #if zero - end #for r - return Q1, X1 + + return Q[:,1:last_row_to_keep], R[1:last_row_to_keep,:] end qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) @@ -419,21 +407,35 @@ ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2}; positive=false, cutoff=-1.0, kwargs...) - QM1, XM = qx(matrix(T)) - # When qx=qr typeof(QM1)==LinearAlgebra.QRCompactWYQ - # When qx=ql typeof(QM1)==Matrix and this should be a no-op - QM = Matrix(QM1) +function qx(qx::Function, T::DenseTensor{<:Any,2}; positive=false, pivot=false, cutoff=-1.0, verbose=false,kwargs...) + do_rank_reduction = cutoff>=0.0 + if do_rank_reduction && qx==ql + @warn "User request ql decomposition with cutoff=$cutoff." * + " Rank reduction requires column pivoting which is not supported for ql decomposition in lapack/ITensors" + do_rank_reduction=false + end + if pivot && qx==ql + @warn "User request ql decomposition with column pivoting." * + " Column pivoting is not supported for ql decomposition in lapack/ITensors" + pivot=false + end + pivot=do_rank_reduction + + if pivot + QM, XM, p = qx(matrix(T),Val(true)) #with colun pivoting + QM, XM = trim_rows(Matrix(QM), XM, cutoff;verbose=verbose) + else + QM, XM = qx(matrix(T),Val(false)) #no column pivoting + QM = Matrix(QM) + end # - # Gauge fix diagonal of X into positive definite form. + # Gauge fix diagonal of X into positive definite form. # positive && qx_positive!(qx,QM,XM) # - # Do row removal for rank revealing QR/QL. Probably not worth it to elminate the if statement + # undo the permutation on R, so the T=Q*R again. # - if cutoff >= 0.0 - QM, XM = trim_rows(QM, XM, cutoff; kwargs...) - end + pivot && (XM=XM[:,invperm(p)]) # # Make the new indices to go onto Q and X # @@ -471,7 +473,8 @@ end # Lapack replaces A with Q & L carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function ql(A::AbstractMatrix; kwargs...) +function ql(A::AbstractMatrix,pivot; kwargs...) + @assert pivot==Val(false) Base.require_one_based_indexing(A) T = eltype(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 79d52bbe26..9a162688c6 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -31,9 +31,14 @@ end elt in [Float64, ComplexF64, Float32, ComplexF32], positive in [false, true], singular in [false, true], - rank_reveal in [false, true], + rank_reveal in [false,true], + pivot in [false,true] + + if qx==ql && (rank_reveal || pivot) + continue + end - eps in Base.eps(real(elt)) * 30 + eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. cutoff = rank_reveal ? eps * 1.0 : -1.0 n, m = 4, 8 @@ -49,13 +54,13 @@ end end end # you can set verbose=true if you want to get debug output on rank reduction. - Q, X = qx(A; positive=positive, cutoff=cutoff, verbose=false) #X is R or L. + Q, X = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps if dim(Q, 1) == dim(Q, 2) @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, min(n, m))) atol = eps end - if positive + if positive && !rank_reveal nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. @@ -76,11 +81,11 @@ end A[i, :] = A[1, :] end end - Q, X = qx(A; positive=positive, cutoff=cutoff, verbose=false) + Q, X = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. - if positive + if positive && !rank_reveal nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 99dcc89535..cb9d10107e 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -388,7 +388,7 @@ end @test A ≈ Q * L atol = 1e-13 end - @testset "Rank revealing QR/RQ/QL/LQ decomp on MPS dense $elt tensor" for ninds in + @testset "Rank revealing QR/LQ decomp on MPS dense $elt tensor" for ninds in [1, 2, 3], elt in [Float64, ComplexF64] @@ -404,20 +404,11 @@ end @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - R, Q, q = rq(A, Ainds[1:ninds]; cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 - @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = lq(A, Ainds[1:ninds]; cutoff=1e-12) @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ql(A, Ainds[1:ninds]; cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 - @test A ≈ Q * L atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 end @testset "factorize with QR" begin From 1540aac516b7791a0d72b1130457c4fb19af37fe Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 13 Apr 2023 15:52:24 -0600 Subject: [PATCH 79/90] Format --- NDTensors/src/linearalgebra/linearalgebra.jl | 75 +++++++++++--------- NDTensors/test/linearalgebra.jl | 10 +-- test/base/test_decomp.jl | 4 +- 3 files changed, 46 insertions(+), 43 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index d75575cea7..ffae6c96c4 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -370,22 +370,21 @@ end # Trim out n rows of R based on norm(R_nn)cutoff - last_row_to_keep=r + last_row_to_keep = nr + for r in nr:-1:1 + if norm(R[r:nr, :]) > cutoff + last_row_to_keep = r break end end - num_zero_rows=nr-last_row_to_keep + num_zero_rows = nr - last_row_to_keep if num_zero_rows == 0 return Q, R end @@ -393,12 +392,10 @@ function trim_rows( # Useful output for trouble shooting. # if verbose - println( - "Rank Reveal removing $num_zero_rows rows with log10(cutoff)=$(log10(cutoff))" - ) + println("Rank Reveal removing $num_zero_rows rows with log10(cutoff)=$(log10(cutoff))") end - return Q[:,1:last_row_to_keep], R[1:last_row_to_keep,:] + return Q[:, 1:last_row_to_keep], R[1:last_row_to_keep, :] end qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) @@ -407,35 +404,43 @@ ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2}; positive=false, pivot=false, cutoff=-1.0, verbose=false,kwargs...) - do_rank_reduction = cutoff>=0.0 - if do_rank_reduction && qx==ql +function qx( + qx::Function, + T::DenseTensor{<:Any,2}; + positive=false, + pivot=false, + cutoff=-1.0, + verbose=false, + kwargs..., +) + do_rank_reduction = cutoff >= 0.0 + if do_rank_reduction && qx == ql @warn "User request ql decomposition with cutoff=$cutoff." * - " Rank reduction requires column pivoting which is not supported for ql decomposition in lapack/ITensors" - do_rank_reduction=false + " Rank reduction requires column pivoting which is not supported for ql decomposition in lapack/ITensors" + do_rank_reduction = false end - if pivot && qx==ql + if pivot && qx == ql @warn "User request ql decomposition with column pivoting." * - " Column pivoting is not supported for ql decomposition in lapack/ITensors" - pivot=false + " Column pivoting is not supported for ql decomposition in lapack/ITensors" + pivot = false end - pivot=do_rank_reduction - + pivot = do_rank_reduction + if pivot - QM, XM, p = qx(matrix(T),Val(true)) #with colun pivoting - QM, XM = trim_rows(Matrix(QM), XM, cutoff;verbose=verbose) + QM, XM, p = qx(matrix(T), Val(true)) #with colun pivoting + QM, XM = trim_rows(Matrix(QM), XM, cutoff; verbose=verbose) else - QM, XM = qx(matrix(T),Val(false)) #no column pivoting + QM, XM = qx(matrix(T), Val(false)) #no column pivoting QM = Matrix(QM) - end + end # # Gauge fix diagonal of X into positive definite form. # - positive && qx_positive!(qx,QM,XM) + positive && qx_positive!(qx, QM, XM) # # undo the permutation on R, so the T=Q*R again. # - pivot && (XM=XM[:,invperm(p)]) + pivot && (XM = XM[:, invperm(p)]) # # Make the new indices to go onto Q and X # @@ -455,14 +460,14 @@ end # becuase the diagonal is difficult to locate for rectangular X (it moves between R and L) # we use qx==ql to know if X is lower or upper. # -function qx_positive!(qx::Function, Q::AbstractMatrix,X::AbstractMatrix) +function qx_positive!(qx::Function, Q::AbstractMatrix, X::AbstractMatrix) nr, nc = size(X) - dc = (nc > nr && qx==ql) ? nc - nr : 0 #diag is shifted over by dc if nc>nr - for c in 1:Base.min(nr,nc) + dc = (nc > nr && qx == ql) ? nc - nr : 0 #diag is shifted over by dc if nc>nr + for c in 1:Base.min(nr, nc) if X[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. sign_Xc = sign(X[c, c + dc]) if !isone(sign_Xc) - X[c, :] *= sign_Xc + X[c, :] *= sign_Xc Q[:, c] *= conj(sign_Xc) end end @@ -473,8 +478,8 @@ end # Lapack replaces A with Q & L carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function ql(A::AbstractMatrix,pivot; kwargs...) - @assert pivot==Val(false) +function ql(A::AbstractMatrix, pivot; kwargs...) + @assert pivot == Val(false) Base.require_one_based_indexing(A) T = eltype(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 9a162688c6..740266411c 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -31,10 +31,10 @@ end elt in [Float64, ComplexF64, Float32, ComplexF32], positive in [false, true], singular in [false, true], - rank_reveal in [false,true], - pivot in [false,true] - - if qx==ql && (rank_reveal || pivot) + rank_reveal in [false, true], + pivot in [false, true] + + if qx == ql && (rank_reveal || pivot) continue end @@ -85,7 +85,7 @@ end @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. - if positive && !rank_reveal + if positive && !rank_reveal nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index cb9d10107e..e0471c6787 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -388,8 +388,7 @@ end @test A ≈ Q * L atol = 1e-13 end - @testset "Rank revealing QR/LQ decomp on MPS dense $elt tensor" for ninds in - [1, 2, 3], + @testset "Rank revealing QR/LQ decomp on MPS dense $elt tensor" for ninds in [1, 2, 3], elt in [Float64, ComplexF64] l = Index(5, "l") @@ -408,7 +407,6 @@ end @test dim(q) == 1 #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end @testset "factorize with QR" begin From 0cdc115c33dc34b65e369236bf57ffe93d47e0c4 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 13 Apr 2023 17:02:59 -0600 Subject: [PATCH 80/90] Fix some unit test fails. Factorize likes to pass cutoff=nothing down in the qr ? --- NDTensors/src/linearalgebra/linearalgebra.jl | 28 ++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index ffae6c96c4..f4ec30771d 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -413,6 +413,9 @@ function qx( verbose=false, kwargs..., ) + if isnothing(cutoff) + cutoff = -1.0 + end do_rank_reduction = cutoff >= 0.0 if do_rank_reduction && qx == ql @warn "User request ql decomposition with cutoff=$cutoff." * @@ -455,6 +458,31 @@ function qx( return Q, X end +# Required by svd_recursive +""" + qr_positive(M::AbstractMatrix) + +Compute the QR decomposition of a matrix M +such that the diagonal elements of R are +non-negative. Such a QR decomposition of a +matrix is unique. Returns a tuple (Q,R). +""" +function qr_positive(M::AbstractMatrix) + sparseQ, R = qr(M) + Q = convert(Matrix, sparseQ) + nc = size(Q, 2) + for c in 1:nc + if R[c, c] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + sign_Rc = sign(R[c, c]) + if !isone(sign_Rc) + R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. + Q[:, c] *= sign_Rc + end + end + end + return (Q, R) +end + # # Semi generic function for gauge fixing the diagonal of X into positive definite form. # becuase the diagonal is difficult to locate for rectangular X (it moves between R and L) From 9da7b54e17c797bd22902d094c2a7438461da021 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Thu, 20 Apr 2023 10:28:18 -0600 Subject: [PATCH 81/90] For column Pivot QR, return column permutation arrays. --- NDTensors/src/blocksparse/linearalgebra.jl | 10 ++- NDTensors/src/linearalgebra/linearalgebra.jl | 7 +- NDTensors/test/linearalgebra.jl | 37 +++++++-- src/tensor_operations/matrix_decomposition.jl | 8 +- test/base/test_decomp.jl | 79 +++++++++++++++---- 5 files changed, 111 insertions(+), 30 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 0954bb3383..ae7b137b5b 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -312,6 +312,7 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) Xs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + perms = Vector{Vector{Int64}}(undef, 0) for (jj, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) @@ -321,9 +322,14 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) return nothing end - Q, X = QXb + Q, X, perm = QXb Qs[jj] = Q Xs[jj] = X + !isnothing(perm) && push!(perms, perm) #save permutation vector for each block. + end + + if length(perms) == 0 + perms = nothing end # @@ -358,7 +364,7 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) blockview(X, nzblocksX[n]) .= Xs[n] end - return Q, X + return Q, X, perms end function exp( diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index f4ec30771d..a355171b3c 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -427,7 +427,9 @@ function qx( " Column pivoting is not supported for ql decomposition in lapack/ITensors" pivot = false end - pivot = do_rank_reduction + if do_rank_reduction + pivot = true + end if pivot QM, XM, p = qx(matrix(T), Val(true)) #with colun pivoting @@ -435,6 +437,7 @@ function qx( else QM, XM = qx(matrix(T), Val(false)) #no column pivoting QM = Matrix(QM) + p = nothing end # # Gauge fix diagonal of X into positive definite form. @@ -455,7 +458,7 @@ function qx( Xinds = IndsT((q, ind(T, 2))) Q = tensor(Dense(vec(QM)), Qinds) X = tensor(Dense(vec(XM)), Xinds) - return Q, X + return Q, X, p end # Required by svd_recursive diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 740266411c..df9f7871ce 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -24,8 +24,8 @@ end @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 end -@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, rank_reveal=$rank_reveal" for qx in - [ +@testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, rank_reveal=$rank_reveal, pivot=$pivot" for qx in + [ qr, ql ], elt in [Float64, ComplexF64, Float32, ComplexF32], @@ -54,23 +54,35 @@ end end end # you can set verbose=true if you want to get debug output on rank reduction. - Q, X = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) #X is R or L. + Q, X, p = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps if dim(Q, 1) == dim(Q, 2) @test array(Q) * array(Q)' ≈ Diagonal(fill(1.0, min(n, m))) atol = eps end - if positive && !rank_reveal + if positive && !rank_reveal && !pivot nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if cutoff > 0 && singular + if positive && !isnothing(p) + Xp = X[:, p] #permute columns so diag gets restored to the right place. + nr, nc = size(Xp) + dr = qx == ql ? Base.max(0, nc - nr) : 0 + diagX = diag(Xp[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX) .>= 0.0) + @test all(imag(diagX) .== 0.0) + end + + if cutoff >= 0 && singular @test dim(Q, 2) == 2 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). @test dim(X, 1) == 2 #Redundant? end + if (cutoff >= 0.0 || pivot) && qx == qr + @test !isnothing(p) + end # # Tall matrix (more rows than cols) # @@ -81,21 +93,32 @@ end A[i, :] = A[1, :] end end - Q, X = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) + Q, X, p = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. - if positive && !rank_reveal + if positive && !rank_reveal && !pivot nr, nc = size(X) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end + if positive && !isnothing(p) + Xp = X[:, p] #permute columns so diag gets restored to the right place. + nr, nc = size(Xp) + dr = qx == ql ? Base.max(0, nc - nr) : 0 + diagX = diag(Xp[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. + @test all(real(diagX) .>= 0.0) + @test all(imag(diagX) .== 0.0) + end if cutoff > 0 && singular @test dim(Q, 2) == 4 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). @test dim(X, 1) == 4 #Redundant? end + if (cutoff >= 0.0 || pivot) && qx == qr + @test !isnothing(p) + end end nothing diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index c0fd02943f..09117720e2 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -458,7 +458,7 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar # AC = permute(AC, cL, cR; allow_alias=true) - QT, XT = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. + QT, XT, perm = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. # # Undo the combine oepration, to recover all tensor indices. # @@ -474,7 +474,7 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar X = settags(X, tags, q) q = settags(q, tags) - return Q, X, q + return Q, X, q, perm end # @@ -482,7 +482,7 @@ end # with swapping the left and right indices. The X tensor = R or L. # function xq(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) - Q, X, q = qx(A, Rinds, Linds; kwargs...) + Q, X, q, perm = qx(A, Rinds, Linds; kwargs...) # # fix up the tag name for the index between Q and L. # @@ -490,7 +490,7 @@ function xq(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar X = settags(X, tags, q) q = settags(q, tags) - return X, Q, q + return X, Q, q, perm end polar(A::ITensor; kwargs...) = error(noinds_error_message("polar")) diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index e0471c6787..5eddbe99c3 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -87,10 +87,23 @@ function rank_fix(A::ITensor, Linds...) At = NDTensors.tensor(AC) nc = dim(At, 2) @assert nc >= 2 - for c in 2:nc - At[:, c] = At[:, 1] * 1.05^c + if hasqns(At) + # In this case we make each nz block have rank=1 + expected_rank = nnzblocks(At) + for b in nzblocks(At) + bv = ITensors.blockview(At, b) + nr, nc = dims(bv) + for c in 2:nc + bv[:, c] = bv[:, 1] * 1.05^c + end + end + else + for c in 2:nc + At[:, c] = At[:, 1] * 1.05^c + end + expected_rank = 1 end - return itensor(At) * dag(CL) * dag(CR) + return itensor(At) * dag(CL) * dag(CR), expected_rank end function diag_upper(l::Index, A::ITensor) @@ -356,23 +369,28 @@ end A = randomITensor(l, s, s', r) Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]; positive=true) + Q, R, q, p = qr(A, Ainds[1:ninds]; positive=true) @test min(diag_upper(q, R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ql(A, Ainds[1:ninds]; positive=true) + @test isnothing(p) + Q, L, q, p = ql(A, Ainds[1:ninds]; positive=true) @test min(diag_lower(q, L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test isnothing(p) - R, Q, q = rq(A, Ainds[1:ninds]; positive=true) + R, Q, q, p = rq(A, Ainds[1:ninds]; positive=true) @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = lq(A, Ainds[1:ninds]; positive=true) + @test isnothing(p) + + L, Q, q, p = lq(A, Ainds[1:ninds]; positive=true) @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper @test A ≈ Q * L atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test isnothing(p) end @testset "QR/QL block sparse with positive R" begin @@ -380,15 +398,18 @@ end s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") r = Index(QN("Sz", 0) => 3; tags="r") A = randomITensor(l, s, dag(s'), r) - Q, R, q = qr(A, l, s, dag(s'); positive=true) + Q, R, q, p = qr(A, l, s, dag(s'); positive=true) @test min(diag(R)...) > 0.0 @test A ≈ Q * R atol = 1e-13 - Q, L, q = ql(A, l, s, dag(s'); positive=true) + @test isnothing(p) + + Q, L, q, p = ql(A, l, s, dag(s'); positive=true) @test min(diag(L)...) > 0.0 @test A ≈ Q * L atol = 1e-13 + @test isnothing(p) end - @testset "Rank revealing QR/LQ decomp on MPS dense $elt tensor" for ninds in [1, 2, 3], + @testset "Rank revealing QR/LQ decomp on MPO dense $elt tensor" for ninds in [1, 2, 3], elt in [Float64, ComplexF64] l = Index(5, "l") @@ -397,16 +418,44 @@ end A = randomITensor(elt, l, s, s', r) Ainds = inds(A) - A = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q = qr(A, Ainds[1:ninds]; cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 + A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. + Q, R, q, p = qr(A, Ainds[1:ninds]; cutoff=1e-12) + @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test !isnothing(p) - L, Q, q = lq(A, Ainds[1:ninds]; cutoff=1e-12) - @test dim(q) == 1 #check that we found rank==1 + L, Q, q, p = lq(A, Ainds[1:ninds]; cutoff=1e-12) + @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 + @test !isnothing(p) + end + + @testset "Rank revealing QR/LQ decomp on MPO block-sparse $elt tensor" for ninds in + [1, 2, 3], + elt in [Float64] + + space = [QN("Sz", 0) => 4, QN("Sz", -2) => 4, QN("Sz", 2) => 4] + site_space = [QN("Sz", -1) => 1, QN("Sz", 1) => 1] + l = Index(space, "l") + s = Index(site_space, "s") + r = Index(space, "r") + A = randomITensor(elt, l, s, s', r) + + Ainds = inds(A) + A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. + Q, R, q, p = qr(A, Ainds[1:ninds]; cutoff=1e-12) + @test dim(q) == expected_rank #check that we found teh correct rank + @test A ≈ Q * R atol = 1e-13 + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + @test !isnothing(p) + + L, Q, q, p = lq(A, Ainds[1:ninds]; cutoff=1e-12) + @test dim(q) == expected_rank #check that we found rank==1 + @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L + @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 + @test !isnothing(p) end @testset "factorize with QR" begin From be0074d244e474d6b988ff2212c5790f4c6e03d2 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 24 Apr 2023 14:36:42 -0600 Subject: [PATCH 82/90] Support atol/rtol interface for column pivot qr --- NDTensors/src/blocksparse/linearalgebra.jl | 4 +- NDTensors/src/linearalgebra/linearalgebra.jl | 45 ++++++++--- NDTensors/test/linearalgebra.jl | 14 ++-- test/base/test_decomp.jl | 85 +++++++++++++++++++- 4 files changed, 122 insertions(+), 26 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index ae7b137b5b..5ad956243f 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -304,7 +304,7 @@ qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) +function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_atol=-1.0, block_rtol=-1.0, atol=-1.0, rtol=-1.0, kwargs...) ElT = eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) @@ -316,7 +316,7 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) for (jj, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) - QXb = qx(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + QXb = qx(blockT; atol=block_atol, rtol=block_rtol, kwargs...) #call dense qr at src/linearalgebra.jl 387 if (isnothing(QXb)) return nothing diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index a355171b3c..453bc78754 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -370,15 +370,31 @@ end # Trim out n rows of R based on norm(R_nn)=0.0,rtol>=0.0 + # for r in nr:-1:1 + # Rnn=norm(R[r:nr, :]) + # R11=norm(R[1:r-1, :]) + # if (do_atol && Rnn > atol) || (do_rtol && Rnn/R11 > rtol) + # last_row_to_keep = r + # break + # end + # end + # + # Could also do the same test but only looking at the diagonals + # + dR=diag(R) for r in nr:-1:1 - if norm(R[r:nr, :]) > cutoff + Rnn=norm(dR[r:nr]) + R11=norm(dR[1:r-1]) + if (do_atol && Rnn > atol) || (do_rtol && Rnn/R11 > rtol) last_row_to_keep = r break end @@ -386,13 +402,14 @@ function trim_rows(Q::AbstractMatrix, R::AbstractMatrix, cutoff::Float64; verbos num_zero_rows = nr - last_row_to_keep if num_zero_rows == 0 + verbose && println("Rank Reveal removing $num_zero_rows rows with atol=$atol, rtol=$rtol") return Q, R end # # Useful output for trouble shooting. # if verbose - println("Rank Reveal removing $num_zero_rows rows with log10(cutoff)=$(log10(cutoff))") + println("Rank Reveal removing $num_zero_rows rows with atol=$atol, rtol=$rtol") end return Q[:, 1:last_row_to_keep], R[1:last_row_to_keep, :] @@ -409,22 +426,24 @@ function qx( T::DenseTensor{<:Any,2}; positive=false, pivot=false, - cutoff=-1.0, + atol=-1.0, #absolute tolerance for rank reduction + rtol=-1.0, #relative tolerance for rank reduction + block_rtol=-1.0, #This is supposed to be for block sparse, but we reluctantly accept it here. verbose=false, kwargs..., ) - if isnothing(cutoff) - cutoff = -1.0 + if rtol<0.0 && block_rtol>=0.0 + rtol=block_rtol end - do_rank_reduction = cutoff >= 0.0 + do_rank_reduction = (atol >= 0.0) || (rtol >= 0.0) if do_rank_reduction && qx == ql - @warn "User request ql decomposition with cutoff=$cutoff." * - " Rank reduction requires column pivoting which is not supported for ql decomposition in lapack/ITensors" + @warn "User requested rq/ql decomposition with atol=$atol, rtol=$rtol." * + " Rank reduction requires column pivoting which is not supported for rq/ql decomposition in lapack/ITensors" do_rank_reduction = false end if pivot && qx == ql - @warn "User request ql decomposition with column pivoting." * - " Column pivoting is not supported for ql decomposition in lapack/ITensors" + @warn "User requested rq/ql decomposition with column pivoting." * + " Column pivoting is not supported for rq/ql decomposition in lapack/ITensors" pivot = false end if do_rank_reduction @@ -433,7 +452,7 @@ function qx( if pivot QM, XM, p = qx(matrix(T), Val(true)) #with colun pivoting - QM, XM = trim_rows(Matrix(QM), XM, cutoff; verbose=verbose) + QM, XM = trim_rows(Matrix(QM), XM, atol, rtol; verbose=verbose) else QM, XM = qx(matrix(T), Val(false)) #no column pivoting QM = Matrix(QM) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index df9f7871ce..1c6083e31c 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -40,7 +40,7 @@ end eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. - cutoff = rank_reveal ? eps * 1.0 : -1.0 + atol = rank_reveal ? eps * 1.0 : -1.0 n, m = 4, 8 # # Wide matrix (more columns than rows) @@ -54,7 +54,7 @@ end end end # you can set verbose=true if you want to get debug output on rank reduction. - Q, X, p = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) #X is R or L. + Q, X, p = qx(A; positive=positive, atol=atol, pivot=pivot, verbose=false) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps if dim(Q, 1) == dim(Q, 2) @@ -76,11 +76,11 @@ end @test all(imag(diagX) .== 0.0) end - if cutoff >= 0 && singular + if atol >= 0 && singular @test dim(Q, 2) == 2 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). @test dim(X, 1) == 2 #Redundant? end - if (cutoff >= 0.0 || pivot) && qx == qr + if (atol >= 0.0 || pivot) && qx == qr @test !isnothing(p) end # @@ -93,7 +93,7 @@ end A[i, :] = A[1, :] end end - Q, X, p = qx(A; positive=positive, cutoff=cutoff, pivot=pivot, verbose=false) + Q, X, p = qx(A; positive=positive, atol=atol, pivot=pivot, verbose=false) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. @@ -112,11 +112,11 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if cutoff > 0 && singular + if atol > 0 && singular @test dim(Q, 2) == 4 #make sure the rank revealing mechanism hacked off the columns of Q (and rows of X). @test dim(X, 1) == 4 #Redundant? end - if (cutoff >= 0.0 || pivot) && qx == qr + if (atol >= 0.0 || pivot) && qx == qr @test !isnothing(p) end end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 5eddbe99c3..5a0dc50509 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -409,6 +409,83 @@ end @test isnothing(p) end + @testset "Dense rank revealing QR/LQ decomp interface options" for qx in [qr,lq] + l = Index(5, "l") + s = Index(2, "s") + r = Index(5, "r") + A = randomITensor(Float64, l, s, s', r) + qrinds = inds(A)[1:2] + rinds = noncommoninds(A,qrinds) + A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. + + Q, R = qx(A,l,s) # no pivoting + Q, R, iq = qx(A,qrinds) # no pivoting + @test dim(iq) == dim(qrinds) + Q, R, iq, p = qx(A,qrinds) # no pivoting + @test isnothing(p) + @test dim(iq) == dim(qrinds) + + # Q, R, iq, p = qx(A,qrinds; pivot=Val(false)) not supported + Q, R, iq, p = qx(A,qrinds; pivot=false) # no pivoting + @test isnothing(p) + @test dim(iq) == dim(qrinds) + + Q, R, iq, p = qx(A,qrinds; pivot=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == dim(qrinds) + + if VERSION >= v"1.7" + Q, R, iq, p= qx(A,qrinds; pivot=NoPivot()) # no pivoting + Q, R, iq, p= qr(A,qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction + Q, R, iq, p= lq(A,qrinds; pivot=RowNorm()) # row pivoting, no rank reduction + end + + Q, R, iq, p = qx(A,qrinds; atol=1e-14) # absolute tolerance for rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == expected_rank + + Q, R, iq, p = qx(A,qrinds; rtol=1e-15) # relative tolerance for rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == expected_rank + + Q, R, iq, p = qx(A,qrinds; block_rtol=1e-15) # relative tolerance for rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == expected_rank + + end + + @testset "Blocksparse rank revealing QR/LQ decomp interface options" begin + space = [QN("Sz", 0) => 4, QN("Sz", -2) => 4, QN("Sz", 2) => 4] + site_space = [QN("Sz", -1) => 1, QN("Sz", 1) => 1] + l = Index(space, "l") + s = Index(site_space, "s") + r = Index(space, "r") + A = randomITensor(Float64, l, s, s', r) + qrinds = inds(A)[1:2] + rinds = noncommoninds(A,qrinds) + A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. + + Q, R, iq, p = qr(A,qrinds; block_atol=1e-14) # absolute tolerance for rank reduction + @test !isnothing(p) + @test length(p)>0 + @test dim(iq) == expected_rank + + Q, R, iq, p = qr(A,qrinds; block_rtol=1e-15) # relative tolerance for rank reduction + @test !isnothing(p) + @test length(p)>0 + @test dim(iq) == expected_rank + + Q, R, iq, p = qr(A,qrinds; block_rtol=1e-15,rtol=1000) # rtol ignored. + @test !isnothing(p) + @test length(p)>0 + @test dim(iq) == expected_rank + + end + @testset "Rank revealing QR/LQ decomp on MPO dense $elt tensor" for ninds in [1, 2, 3], elt in [Float64, ComplexF64] @@ -419,13 +496,13 @@ end Ainds = inds(A) A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q, p = qr(A, Ainds[1:ninds]; cutoff=1e-12) + Q, R, q, p = qr(A, Ainds[1:ninds]; atol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @test !isnothing(p) - L, Q, q, p = lq(A, Ainds[1:ninds]; cutoff=1e-12) + L, Q, q, p = lq(A, Ainds[1:ninds]; atol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 @@ -445,13 +522,13 @@ end Ainds = inds(A) A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q, p = qr(A, Ainds[1:ninds]; cutoff=1e-12) + Q, R, q, p = qr(A, Ainds[1:ninds]; block_atol=1e-12) @test dim(q) == expected_rank #check that we found teh correct rank @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 @test !isnothing(p) - L, Q, q, p = lq(A, Ainds[1:ninds]; cutoff=1e-12) + L, Q, q, p = lq(A, Ainds[1:ninds]; block_atol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 From a86f4cfbe5a6ecb6ce8972e6f6944b53e039d745 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Mon, 24 Apr 2023 16:00:05 -0600 Subject: [PATCH 83/90] Support the new NoPivot, ColumnNorm types Also add RowNorm for lq --- NDTensors/src/exports.jl | 4 +++- NDTensors/src/linearalgebra/linearalgebra.jl | 12 ++++++++++++ src/exports.jl | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index 427f220e10..d467c66588 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -79,4 +79,6 @@ export store, # linearalgebra.jl - qr + qr, + RowNorm + diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 453bc78754..111853ea35 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -415,8 +415,18 @@ function trim_rows(Q::AbstractMatrix, R::AbstractMatrix, atol::Float64, rtol::Fl return Q[:, 1:last_row_to_keep], R[1:last_row_to_keep, :] end +struct RowNorm end #for row pivoting lq + qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) + +translate_pivot(pivot::Bool)::Bool=pivot +if VERSION >= v"1.7" +translate_pivot(pivot::NoPivot)::Bool=false +translate_pivot(pivot::ColumnNorm)::Bool=true +translate_pivot(pivot::RowNorm)::Bool=true +end + # # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. @@ -432,6 +442,8 @@ function qx( verbose=false, kwargs..., ) + pivot=translate_pivot(pivot) + if rtol<0.0 && block_rtol>=0.0 rtol=block_rtol end diff --git a/src/exports.jl b/src/exports.jl index b5cfea5fa3..8b18e616ef 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -12,6 +12,7 @@ export # Types Block, Spectrum, + RowNorm, # Methods eigs, entropy, From cde6dad5edf3c26c3f51b51d821d3ffe27406dd9 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 25 Apr 2023 09:36:23 -0600 Subject: [PATCH 84/90] Support Matts proposed interface RowNorm() for lq atol/rtol instead of cutoff block_rtol for future block sparse relative cutoff support --- NDTensors/src/blocksparse/linearalgebra.jl | 4 +- NDTensors/src/exports.jl | 8 ++- NDTensors/src/linearalgebra/linearalgebra.jl | 13 +++-- src/exports.jl | 6 +- src/tensor_operations/matrix_decomposition.jl | 18 ++++-- test/base/test_decomp.jl | 58 +++++++++++++++---- 6 files changed, 79 insertions(+), 28 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 5ad956243f..a20bc45e61 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -304,7 +304,7 @@ qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_atol=-1.0, block_rtol=-1.0, atol=-1.0, rtol=-1.0, kwargs...) +function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_rtol=-1.0, kwargs...) ElT = eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) @@ -316,7 +316,7 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_atol=-1.0, block_ for (jj, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) - QXb = qx(blockT; atol=block_atol, rtol=block_rtol, kwargs...) #call dense qr at src/linearalgebra.jl 387 + QXb = qx(blockT; rtol=block_rtol, kwargs...) #call dense qr at src/linearalgebra.jl 387 if (isnothing(QXb)) return nothing diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index d467c66588..793db6cc3c 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -79,6 +79,8 @@ export store, # linearalgebra.jl - qr, - RowNorm - + qr + + if VERSION >= v"1.7" + export RowNorm + end diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 111853ea35..5c10ed1752 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -415,16 +415,18 @@ function trim_rows(Q::AbstractMatrix, R::AbstractMatrix, atol::Float64, rtol::Fl return Q[:, 1:last_row_to_keep], R[1:last_row_to_keep, :] end -struct RowNorm end #for row pivoting lq +if VERSION >= v"1.7" + struct RowNorm end #for row pivoting lq +end qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) translate_pivot(pivot::Bool)::Bool=pivot -if VERSION >= v"1.7" -translate_pivot(pivot::NoPivot)::Bool=false -translate_pivot(pivot::ColumnNorm)::Bool=true -translate_pivot(pivot::RowNorm)::Bool=true +if VERSION >= v"1.7" + translate_pivot(pivot::NoPivot)::Bool=false + translate_pivot(pivot::ColumnNorm)::Bool=true + translate_pivot(pivot::RowNorm)::Bool=true end # @@ -442,6 +444,7 @@ function qx( verbose=false, kwargs..., ) + pivot=translate_pivot(pivot) if rtol<0.0 && block_rtol>=0.0 diff --git a/src/exports.jl b/src/exports.jl index 8b18e616ef..c08ab6e399 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -12,7 +12,6 @@ export # Types Block, Spectrum, - RowNorm, # Methods eigs, entropy, @@ -394,3 +393,8 @@ export hasqns, nblocks, qn + + if VERSION >= v"1.7" + export RowNorm + end + diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 09117720e2..bf9fdeff2b 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -422,8 +422,11 @@ lq(A::ITensor, Linds...; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs. # Core function where both left and right indices are supplied as tuples or vectors # Handle default tags and dispatch to generic qx/xq functions. # -function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", kwargs...) - return qx(qr, A, Linds, Rinds; tags, kwargs...) +function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", pivot=false, kwargs...) + if VERSION >= v"1.7" && typeof(pivot)==RowNorm + @warn "Please use ColumnNorm() instead of RowNorm() for pivoted qr decomposition." + end + return qx(qr, A, Linds, Rinds; tags, pivot=pivot, kwargs...) end function ql(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,ql", kwargs...) return qx(ql, A, Linds, Rinds; tags, kwargs...) @@ -431,8 +434,11 @@ end function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...) return xq(ql, A, Linds, Rinds; tags, kwargs...) end -function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", kwargs...) - return xq(qr, A, Linds, Rinds; tags, kwargs...) +function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", pivot=false, kwargs...) + if VERSION >= v"1.7" && typeof(pivot)==ColumnNorm + @warn "Please use RowNorm() instead of ColumnNorm() for pivoted lq decomposition." + end + return xq(qr, A, Linds, Rinds; tags, pivot=pivot, kwargs...) end # # Generic function implementing both qr and ql decomposition. The X tensor = R or L. @@ -481,8 +487,8 @@ end # Generic function implementing both rq and lq decomposition. Implemented using qr/ql # with swapping the left and right indices. The X tensor = R or L. # -function xq(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) - Q, X, q, perm = qx(A, Rinds, Linds; kwargs...) +function xq(qxf::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) + Q, X, q, perm = qx(qxf,A, Rinds, Linds; tags=tags, kwargs...) # # fix up the tag name for the index between Q and L. # diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 5a0dc50509..b7cb512e93 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -434,13 +434,7 @@ end @test !isnothing(p) @test length(p)==dim(rinds) @test dim(iq) == dim(qrinds) - - if VERSION >= v"1.7" - Q, R, iq, p= qx(A,qrinds; pivot=NoPivot()) # no pivoting - Q, R, iq, p= qr(A,qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction - Q, R, iq, p= lq(A,qrinds; pivot=RowNorm()) # row pivoting, no rank reduction - end - + Q, R, iq, p = qx(A,qrinds; atol=1e-14) # absolute tolerance for rank reduction @test !isnothing(p) @test length(p)==dim(rinds) @@ -458,6 +452,48 @@ end end + if VERSION >= v"1.7" + @testset "Dense rank revealing QR/LQ decomp interface options, julia VERSION>=1.7" begin + l = Index(5, "l") + s = Index(2, "s") + r = Index(5, "r") + A = randomITensor(Float64, l, s, s', r) + qrinds = inds(A)[1:2] + rinds = noncommoninds(A,qrinds) + A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. + + + Q, R, iq, p= qr(A,qrinds; pivot=NoPivot()) # no pivoting + @test isnothing(p) + @test dim(iq) == dim(qrinds) + + L, Q, iq, p= lq(A,qrinds; pivot=NoPivot()) # no pivoting + @test isnothing(p) + @test dim(iq) == dim(qrinds) + + Q, R, iq, p= qr(A,qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == dim(qrinds) + + L, Q, iq, p= lq(A,qrinds; pivot=RowNorm()) # row pivoting, no rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == dim(qrinds) + + @test_logs (:warn,"Please use ColumnNorm() instead of RowNorm() for pivoted qr decomposition.") Q, R, iq, p= qr(A,qrinds; pivot=RowNorm()) # column pivoting, no rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == dim(qrinds) + + @test_logs (:warn,"Please use RowNorm() instead of ColumnNorm() for pivoted lq decomposition.") L, Q, iq, p= lq(A,qrinds; pivot=ColumnNorm()) # row pivoting, no rank reduction + @test !isnothing(p) + @test length(p)==dim(rinds) + @test dim(iq) == dim(qrinds) + + end + end + @testset "Blocksparse rank revealing QR/LQ decomp interface options" begin space = [QN("Sz", 0) => 4, QN("Sz", -2) => 4, QN("Sz", 2) => 4] site_space = [QN("Sz", -1) => 1, QN("Sz", 1) => 1] @@ -469,7 +505,7 @@ end rinds = noncommoninds(A,qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - Q, R, iq, p = qr(A,qrinds; block_atol=1e-14) # absolute tolerance for rank reduction + Q, R, iq, p = qr(A,qrinds; atol=1e-14) # absolute tolerance for rank reduction @test !isnothing(p) @test length(p)>0 @test dim(iq) == expected_rank @@ -479,7 +515,7 @@ end @test length(p)>0 @test dim(iq) == expected_rank - Q, R, iq, p = qr(A,qrinds; block_rtol=1e-15,rtol=1000) # rtol ignored. + Q, R, iq, p = qr(A,qrinds; block_rtol=1e-15,rtol=1000.0) # rtol ignored. @test !isnothing(p) @test length(p)>0 @test dim(iq) == expected_rank @@ -522,13 +558,13 @@ end Ainds = inds(A) A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q, p = qr(A, Ainds[1:ninds]; block_atol=1e-12) + Q, R, q, p = qr(A, Ainds[1:ninds]; block_rtol=1e-12) @test dim(q) == expected_rank #check that we found teh correct rank @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 @test !isnothing(p) - L, Q, q, p = lq(A, Ainds[1:ninds]; block_atol=1e-12) + L, Q, q, p = lq(A, Ainds[1:ninds]; block_rtol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 From c790493296ec347718bbb8d971ef6226f834ccc7 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Tue, 25 Apr 2023 17:03:59 -0600 Subject: [PATCH 85/90] Support ql decomp using the MatrixFactorizations package --- NDTensors/Project.toml | 2 + NDTensors/src/NDTensors.jl | 1 + NDTensors/src/exports.jl | 6 +- NDTensors/src/imports.jl | 2 + NDTensors/src/linearalgebra/linearalgebra.jl | 89 ++++++------------- src/exports.jl | 7 +- src/tensor_operations/matrix_decomposition.jl | 14 +-- test/base/test_decomp.jl | 76 ++++++++-------- 8 files changed, 85 insertions(+), 112 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 02fc29b4fa..ea82f82989 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -12,6 +12,7 @@ Folds = "41a02a25-b8f0-4f67-bc48-60067656b558" Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +MatrixFactorizations = "a3b82374-2e81-5b9e-98ce-41277c0e4c87" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Requires = "ae029012-a4dd-5104-9daa-d747884805df" SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" @@ -29,6 +30,7 @@ FLoops = "0.2.1" Folds = "0.2.8" Functors = "0.2, 0.3, 0.4" HDF5 = "0.14, 0.15, 0.16" +MatrixFactorizations = "0.9.6" Requires = "1.1" SimpleTraits = "0.9.4" SplitApplyCombine = "1.2.2" diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 96f72ef08b..1ec4b84f72 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -8,6 +8,7 @@ using FLoops using Folds using Random using LinearAlgebra +using MatrixFactorizations using StaticArrays using Functors using HDF5 diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index 793db6cc3c..853da4a7d0 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -81,6 +81,6 @@ export # linearalgebra.jl qr - if VERSION >= v"1.7" - export RowNorm - end +if VERSION >= v"1.7" + export RowNorm +end diff --git a/NDTensors/src/imports.jl b/NDTensors/src/imports.jl index 13b466fca4..c32dcba281 100644 --- a/NDTensors/src/imports.jl +++ b/NDTensors/src/imports.jl @@ -57,4 +57,6 @@ import Adapt: adapt_structure, adapt_storage import LinearAlgebra: diag, exp, norm, qr +import MatrixFactorizations: ql, rq + import TupleTools: isperm diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 5c10ed1752..ee482d93dc 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -370,7 +370,9 @@ end # Trim out n rows of R based on norm(R_nn)=0.0,rtol>=0.0 + do_atol, do_rtol = atol >= 0.0, rtol >= 0.0 # for r in nr:-1:1 # Rnn=norm(R[r:nr, :]) # R11=norm(R[1:r-1, :]) @@ -390,11 +392,11 @@ function trim_rows(Q::AbstractMatrix, R::AbstractMatrix, atol::Float64, rtol::Fl # # Could also do the same test but only looking at the diagonals # - dR=diag(R) + dR = diag(R) for r in nr:-1:1 - Rnn=norm(dR[r:nr]) - R11=norm(dR[1:r-1]) - if (do_atol && Rnn > atol) || (do_rtol && Rnn/R11 > rtol) + Rnn = norm(dR[r:nr]) + R11 = norm(dR[1:(r - 1)]) + if (do_atol && Rnn > atol) || (do_rtol && Rnn / R11 > rtol) last_row_to_keep = r break end @@ -402,7 +404,8 @@ function trim_rows(Q::AbstractMatrix, R::AbstractMatrix, atol::Float64, rtol::Fl num_zero_rows = nr - last_row_to_keep if num_zero_rows == 0 - verbose && println("Rank Reveal removing $num_zero_rows rows with atol=$atol, rtol=$rtol") + verbose && + println("Rank Reveal removing $num_zero_rows rows with atol=$atol, rtol=$rtol") return Q, R end # @@ -422,13 +425,22 @@ end qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) -translate_pivot(pivot::Bool)::Bool=pivot +translate_pivot(pivot::Bool)::Bool = pivot if VERSION >= v"1.7" - translate_pivot(pivot::NoPivot)::Bool=false - translate_pivot(pivot::ColumnNorm)::Bool=true - translate_pivot(pivot::RowNorm)::Bool=true + translate_pivot(pivot::NoPivot)::Bool = false + translate_pivot(pivot::ColumnNorm)::Bool = true + translate_pivot(pivot::RowNorm)::Bool = true end +matrix(Q::LinearAlgebra.QRCompactWYQ) = Matrix(Q) +function matrix(Q::MatrixFactorizations.QLPackedQ) + n, m = size(Q.factors) + if n <= m + return Matrix(Q) + else + return Q * Matrix(LinearAlgebra.I, m, m) + end +end # # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. @@ -444,11 +456,10 @@ function qx( verbose=false, kwargs..., ) + pivot = translate_pivot(pivot) - pivot=translate_pivot(pivot) - - if rtol<0.0 && block_rtol>=0.0 - rtol=block_rtol + if rtol < 0.0 && block_rtol >= 0.0 + rtol = block_rtol end do_rank_reduction = (atol >= 0.0) || (rtol >= 0.0) if do_rank_reduction && qx == ql @@ -470,7 +481,7 @@ function qx( QM, XM = trim_rows(Matrix(QM), XM, atol, rtol; verbose=verbose) else QM, XM = qx(matrix(T), Val(false)) #no column pivoting - QM = Matrix(QM) + QM = matrix(QM) p = nothing end # @@ -539,52 +550,6 @@ function qx_positive!(qx::Function, Q::AbstractMatrix, X::AbstractMatrix) end end -# -# Lapack replaces A with Q & L carefully packed together. So here we just copy a -# before letting lapack overwirte it. -# -function ql(A::AbstractMatrix, pivot; kwargs...) - @assert pivot == Val(false) - Base.require_one_based_indexing(A) - T = eltype(A) - AA = similar(A, LinearAlgebra._qreltype(T), size(A)) - copyto!(AA, A) - return ql!(AA; kwargs...) -end -# -# This is where the low level call to lapack actually occurs. Most of the work is -# about unpacking Q and L from the A matrix. -# -function ql!(A::StridedMatrix{<:LAPACK.BlasFloat}) - tau = Base.similar(A, min(size(A)...)) - x = LAPACK.geqlf!(A, tau) - #save L from the lower portion of A, before orgql! mangles it! - nr, nc = size(A) - mn = min(nr, nc) - L = similar(A, (mn, nc)) - for r in 1:mn - for c in 1:(r + nc - mn) - L[r, c] = A[r + nr - mn, c] - end - for c in (r + 1 + nc - mn):nc - L[r, c] = 0.0 - end - end - # Now we need shift the orth vectors from the right side of Q over the left side, before - if (mn < nc) - for r in 1:nr - for c in 1:mn - A[r, c] = A[r, c + nc - mn] - end - end - for r in 1:nr - A = A[:, 1:mn] #whack the extra columns in A. - end - end - LAPACK.orgql!(A, tau) - return A, L -end - # TODO: support alg keyword argument to choose the svd algorithm function polar(T::DenseTensor{ElT,2,IndsT}) where {ElT,IndsT} QM, RM = polar(matrix(T)) diff --git a/src/exports.jl b/src/exports.jl index c08ab6e399..6aebdf67a5 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -394,7 +394,6 @@ export nblocks, qn - if VERSION >= v"1.7" - export RowNorm - end - +if VERSION >= v"1.7" + export RowNorm +end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index bf9fdeff2b..0d1c61a4e4 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -422,8 +422,10 @@ lq(A::ITensor, Linds...; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs. # Core function where both left and right indices are supplied as tuples or vectors # Handle default tags and dispatch to generic qx/xq functions. # -function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", pivot=false, kwargs...) - if VERSION >= v"1.7" && typeof(pivot)==RowNorm +function qr( + A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", pivot=false, kwargs... +) + if VERSION >= v"1.7" && typeof(pivot) == RowNorm @warn "Please use ColumnNorm() instead of RowNorm() for pivoted qr decomposition." end return qx(qr, A, Linds, Rinds; tags, pivot=pivot, kwargs...) @@ -434,8 +436,10 @@ end function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...) return xq(ql, A, Linds, Rinds; tags, kwargs...) end -function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", pivot=false, kwargs...) - if VERSION >= v"1.7" && typeof(pivot)==ColumnNorm +function lq( + A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", pivot=false, kwargs... +) + if VERSION >= v"1.7" && typeof(pivot) == ColumnNorm @warn "Please use RowNorm() instead of ColumnNorm() for pivoted lq decomposition." end return xq(qr, A, Linds, Rinds; tags, pivot=pivot, kwargs...) @@ -488,7 +492,7 @@ end # with swapping the left and right indices. The X tensor = R or L. # function xq(qxf::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) - Q, X, q, perm = qx(qxf,A, Rinds, Linds; tags=tags, kwargs...) + Q, X, q, perm = qx(qxf, A, Rinds, Linds; tags=tags, kwargs...) # # fix up the tag name for the index between Q and L. # diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index b7cb512e93..7df99c184a 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -409,47 +409,46 @@ end @test isnothing(p) end - @testset "Dense rank revealing QR/LQ decomp interface options" for qx in [qr,lq] + @testset "Dense rank revealing QR/LQ decomp interface options" for qx in [qr, lq] l = Index(5, "l") s = Index(2, "s") r = Index(5, "r") A = randomITensor(Float64, l, s, s', r) qrinds = inds(A)[1:2] - rinds = noncommoninds(A,qrinds) + rinds = noncommoninds(A, qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - Q, R = qx(A,l,s) # no pivoting - Q, R, iq = qx(A,qrinds) # no pivoting + Q, R = qx(A, l, s) # no pivoting + Q, R, iq = qx(A, qrinds) # no pivoting @test dim(iq) == dim(qrinds) - Q, R, iq, p = qx(A,qrinds) # no pivoting + Q, R, iq, p = qx(A, qrinds) # no pivoting @test isnothing(p) @test dim(iq) == dim(qrinds) # Q, R, iq, p = qx(A,qrinds; pivot=Val(false)) not supported - Q, R, iq, p = qx(A,qrinds; pivot=false) # no pivoting + Q, R, iq, p = qx(A, qrinds; pivot=false) # no pivoting @test isnothing(p) @test dim(iq) == dim(qrinds) - Q, R, iq, p = qx(A,qrinds; pivot=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally + Q, R, iq, p = qx(A, qrinds; pivot=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == dim(qrinds) - - Q, R, iq, p = qx(A,qrinds; atol=1e-14) # absolute tolerance for rank reduction + + Q, R, iq, p = qx(A, qrinds; atol=1e-14) # absolute tolerance for rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == expected_rank - Q, R, iq, p = qx(A,qrinds; rtol=1e-15) # relative tolerance for rank reduction + Q, R, iq, p = qx(A, qrinds; rtol=1e-15) # relative tolerance for rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == expected_rank - Q, R, iq, p = qx(A,qrinds; block_rtol=1e-15) # relative tolerance for rank reduction + Q, R, iq, p = qx(A, qrinds; block_rtol=1e-15) # relative tolerance for rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == expected_rank - end if VERSION >= v"1.7" @@ -459,38 +458,40 @@ end r = Index(5, "r") A = randomITensor(Float64, l, s, s', r) qrinds = inds(A)[1:2] - rinds = noncommoninds(A,qrinds) + rinds = noncommoninds(A, qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - - Q, R, iq, p= qr(A,qrinds; pivot=NoPivot()) # no pivoting + Q, R, iq, p = qr(A, qrinds; pivot=NoPivot()) # no pivoting @test isnothing(p) @test dim(iq) == dim(qrinds) - L, Q, iq, p= lq(A,qrinds; pivot=NoPivot()) # no pivoting + L, Q, iq, p = lq(A, qrinds; pivot=NoPivot()) # no pivoting @test isnothing(p) @test dim(iq) == dim(qrinds) - Q, R, iq, p= qr(A,qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction + Q, R, iq, p = qr(A, qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == dim(qrinds) - L, Q, iq, p= lq(A,qrinds; pivot=RowNorm()) # row pivoting, no rank reduction + L, Q, iq, p = lq(A, qrinds; pivot=RowNorm()) # row pivoting, no rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == dim(qrinds) - @test_logs (:warn,"Please use ColumnNorm() instead of RowNorm() for pivoted qr decomposition.") Q, R, iq, p= qr(A,qrinds; pivot=RowNorm()) # column pivoting, no rank reduction + @test_logs ( + :warn, "Please use ColumnNorm() instead of RowNorm() for pivoted qr decomposition." + ) Q, R, iq, p = qr(A, qrinds; pivot=RowNorm()) # column pivoting, no rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == dim(qrinds) - @test_logs (:warn,"Please use RowNorm() instead of ColumnNorm() for pivoted lq decomposition.") L, Q, iq, p= lq(A,qrinds; pivot=ColumnNorm()) # row pivoting, no rank reduction + @test_logs ( + :warn, "Please use RowNorm() instead of ColumnNorm() for pivoted lq decomposition." + ) L, Q, iq, p = lq(A, qrinds; pivot=ColumnNorm()) # row pivoting, no rank reduction @test !isnothing(p) - @test length(p)==dim(rinds) + @test length(p) == dim(rinds) @test dim(iq) == dim(qrinds) - end end @@ -502,24 +503,23 @@ end r = Index(space, "r") A = randomITensor(Float64, l, s, s', r) qrinds = inds(A)[1:2] - rinds = noncommoninds(A,qrinds) + rinds = noncommoninds(A, qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - - Q, R, iq, p = qr(A,qrinds; atol=1e-14) # absolute tolerance for rank reduction + + Q, R, iq, p = qr(A, qrinds; atol=1e-14) # absolute tolerance for rank reduction @test !isnothing(p) - @test length(p)>0 + @test length(p) > 0 @test dim(iq) == expected_rank - Q, R, iq, p = qr(A,qrinds; block_rtol=1e-15) # relative tolerance for rank reduction + Q, R, iq, p = qr(A, qrinds; block_rtol=1e-15) # relative tolerance for rank reduction @test !isnothing(p) - @test length(p)>0 + @test length(p) > 0 @test dim(iq) == expected_rank - Q, R, iq, p = qr(A,qrinds; block_rtol=1e-15,rtol=1000.0) # rtol ignored. + Q, R, iq, p = qr(A, qrinds; block_rtol=1e-15, rtol=1000.0) # rtol ignored. @test !isnothing(p) - @test length(p)>0 + @test length(p) > 0 @test dim(iq) == expected_rank - end @testset "Rank revealing QR/LQ decomp on MPO dense $elt tensor" for ninds in [1, 2, 3], From 9fa3dd370540214cf67f5242efd6396dc202b09f Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 26 Apr 2023 10:05:16 -0600 Subject: [PATCH 86/90] Try to avoid warnings in julia 1.8.5 --- NDTensors/src/linearalgebra/linearalgebra.jl | 50 +++++++++++++------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index ee482d93dc..e0e93c6cd9 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -425,11 +425,26 @@ end qr(T::DenseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) ql(T::DenseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) -translate_pivot(pivot::Bool)::Bool = pivot +# LinearAlgebra qr takes types like Val(true) of ColumnNorm for control of pivoting. +# We some helper functions to deal with all these types changing between julia versions. +# +pivot_to_Bool(pivot::Bool)::Bool = pivot +pivot_to_Bool(::Val{false})::Bool = false +pivot_to_Bool(::Val{true})::Bool = true +if VERSION < v"1.7" + call_pivot(bpivot::Bool,::Function)=Val(bpivot) +end if VERSION >= v"1.7" - translate_pivot(pivot::NoPivot)::Bool = false - translate_pivot(pivot::ColumnNorm)::Bool = true - translate_pivot(pivot::RowNorm)::Bool = true + pivot_to_Bool(pivot::NoPivot)::Bool = false + pivot_to_Bool(pivot::ColumnNorm)::Bool = true + pivot_to_Bool(pivot::RowNorm)::Bool = true + function call_pivot(bpivot::Bool,qx::Function) + if qx==qr + return bpivot ? ColumnNorm() : NoPivot() # LinearAlgebra + else + return Val(bpivot) # MatrixFactorizations + end + end end matrix(Q::LinearAlgebra.QRCompactWYQ) = Matrix(Q) @@ -449,14 +464,14 @@ function qx( qx::Function, T::DenseTensor{<:Any,2}; positive=false, - pivot=false, + pivot=call_pivot(false,qx), atol=-1.0, #absolute tolerance for rank reduction rtol=-1.0, #relative tolerance for rank reduction block_rtol=-1.0, #This is supposed to be for block sparse, but we reluctantly accept it here. verbose=false, kwargs..., ) - pivot = translate_pivot(pivot) + bpivot = pivot_to_Bool(pivot) #We need a simple bool for making decisions below. if rtol < 0.0 && block_rtol >= 0.0 rtol = block_rtol @@ -464,23 +479,24 @@ function qx( do_rank_reduction = (atol >= 0.0) || (rtol >= 0.0) if do_rank_reduction && qx == ql @warn "User requested rq/ql decomposition with atol=$atol, rtol=$rtol." * - " Rank reduction requires column pivoting which is not supported for rq/ql decomposition in lapack/ITensors" + " Rank reduction requires column/row pivoting which is not supported for rq/ql decomposition in lapack/ITensors" do_rank_reduction = false end - if pivot && qx == ql - @warn "User requested rq/ql decomposition with column pivoting." * - " Column pivoting is not supported for rq/ql decomposition in lapack/ITensors" - pivot = false + if bpivot && qx == ql + @warn "User requested rq/ql decomposition with row/column pivoting." * + " Pivoting is not supported for rq/ql decomposition in lapack/ITensors" + bpivot = false end - if do_rank_reduction - pivot = true + if do_rank_reduction #if do_rank_reduction==false then don't change bpivot. + bpivot = true end - if pivot - QM, XM, p = qx(matrix(T), Val(true)) #with colun pivoting + pivot = call_pivot(bpivot,qx) #Convert the bool to whatever type the qx function expects. + if bpivot + QM, XM, p = qx(matrix(T), pivot) #with colun pivoting QM, XM = trim_rows(Matrix(QM), XM, atol, rtol; verbose=verbose) else - QM, XM = qx(matrix(T), Val(false)) #no column pivoting + QM, XM = qx(matrix(T), pivot) #no column pivoting QM = matrix(QM) p = nothing end @@ -491,7 +507,7 @@ function qx( # # undo the permutation on R, so the T=Q*R again. # - pivot && (XM = XM[:, invperm(p)]) + bpivot && (XM = XM[:, invperm(p)]) # # Make the new indices to go onto Q and X # From afe67fd4bd0d086c19506dc74308c8b7ec9c7cf0 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 26 Apr 2023 10:08:44 -0600 Subject: [PATCH 87/90] Format --- NDTensors/src/linearalgebra/linearalgebra.jl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index e0e93c6cd9..33d62c5683 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -432,14 +432,14 @@ pivot_to_Bool(pivot::Bool)::Bool = pivot pivot_to_Bool(::Val{false})::Bool = false pivot_to_Bool(::Val{true})::Bool = true if VERSION < v"1.7" - call_pivot(bpivot::Bool,::Function)=Val(bpivot) + call_pivot(bpivot::Bool, ::Function) = Val(bpivot) end if VERSION >= v"1.7" pivot_to_Bool(pivot::NoPivot)::Bool = false pivot_to_Bool(pivot::ColumnNorm)::Bool = true pivot_to_Bool(pivot::RowNorm)::Bool = true - function call_pivot(bpivot::Bool,qx::Function) - if qx==qr + function call_pivot(bpivot::Bool, qx::Function) + if qx == qr return bpivot ? ColumnNorm() : NoPivot() # LinearAlgebra else return Val(bpivot) # MatrixFactorizations @@ -464,7 +464,7 @@ function qx( qx::Function, T::DenseTensor{<:Any,2}; positive=false, - pivot=call_pivot(false,qx), + pivot=call_pivot(false, qx), atol=-1.0, #absolute tolerance for rank reduction rtol=-1.0, #relative tolerance for rank reduction block_rtol=-1.0, #This is supposed to be for block sparse, but we reluctantly accept it here. @@ -491,7 +491,7 @@ function qx( bpivot = true end - pivot = call_pivot(bpivot,qx) #Convert the bool to whatever type the qx function expects. + pivot = call_pivot(bpivot, qx) #Convert the bool to whatever type the qx function expects. if bpivot QM, XM, p = qx(matrix(T), pivot) #with colun pivoting QM, XM = trim_rows(Matrix(QM), XM, atol, rtol; verbose=verbose) From 99abd4d4367ba4e866c9280c279a437da789bc1f Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 26 Apr 2023 11:39:23 -0600 Subject: [PATCH 88/90] Try and avoid breaking ITensorGPU --- src/tensor_operations/matrix_decomposition.jl | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 0d1c61a4e4..f87b43515f 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -468,7 +468,14 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar # AC = permute(AC, cL, cR; allow_alias=true) - QT, XT, perm = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. + QXp = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. + if length(QXp) == 3 + QT, XT, perm = QXp + else + QT, XT = Qxp #ITensorGPU does not return a perm yet. + perm = nothing + end + # # Undo the combine oepration, to recover all tensor indices. # From 6091157b290c0c922bc3f0efcfbbecf5c9ef29bb Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 26 Apr 2023 11:59:43 -0600 Subject: [PATCH 89/90] Try and avoid breaking ITensorGPU --- src/tensor_operations/matrix_decomposition.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index f87b43515f..05e82e5add 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -472,7 +472,7 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar if length(QXp) == 3 QT, XT, perm = QXp else - QT, XT = Qxp #ITensorGPU does not return a perm yet. + QT, XT = QXp #ITensorGPU does not return a perm yet. perm = nothing end From 6b747dd6622643a95283668af02ff4a0d51d3d42 Mon Sep 17 00:00:00 2001 From: Jan Reimers Date: Wed, 24 May 2023 13:28:16 -0600 Subject: [PATCH 90/90] Switch from returning permutations to returning Rp. --- NDTensors/src/blocksparse/linearalgebra.jl | 30 ++-- NDTensors/src/linearalgebra/linearalgebra.jl | 21 ++- NDTensors/test/linearalgebra.jl | 28 ++-- src/tensor_operations/matrix_decomposition.jl | 30 ++-- test/base/test_decomp.jl | 153 ++++++++++++------ 5 files changed, 174 insertions(+), 88 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index a20bc45e61..6f96d6a34c 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -304,7 +304,9 @@ qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_rtol=-1.0, kwargs...) +function qx( + qx::Function, T::BlockSparseTensor{<:Any,2}; block_rtol=-1.0, return_Rp=false, kwargs... +) ElT = eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) @@ -312,26 +314,24 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_rtol=-1.0, kwargs Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) Xs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - perms = Vector{Vector{Int64}}(undef, 0) + if return_Rp + Xps = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + end for (jj, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) - QXb = qx(blockT; rtol=block_rtol, kwargs...) #call dense qr at src/linearalgebra.jl 387 + QXb = qx(blockT; rtol=block_rtol, return_Rp, kwargs...) #call dense qr at src/linearalgebra.jl 387 if (isnothing(QXb)) return nothing end - Q, X, perm = QXb - Qs[jj] = Q - Xs[jj] = X - !isnothing(perm) && push!(perms, perm) #save permutation vector for each block. - end - - if length(perms) == 0 - perms = nothing + Qs[jj] = QXb[1] + Xs[jj] = QXb[2] + if return_Rp + Xps[jj] = QXb[3] + end end - # # Make the new index connecting Q and R # @@ -358,13 +358,17 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; block_rtol=-1.0, kwargs Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) X = BlockSparseTensor(ElT, undef, nzblocksX, indsX) + Xp = return_Rp ? BlockSparseTensor(ElT, undef, nzblocksX, indsX) : nothing for n in 1:nnzblocksT blockview(Q, nzblocksQ[n]) .= Qs[n] blockview(X, nzblocksX[n]) .= Xs[n] + if return_Rp + blockview(Xp, nzblocksX[n]) .= Xps[n] + end end - return Q, X, perms + return Q, X, Xp end function exp( diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 33d62c5683..22e324f695 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -468,6 +468,7 @@ function qx( atol=-1.0, #absolute tolerance for rank reduction rtol=-1.0, #relative tolerance for rank reduction block_rtol=-1.0, #This is supposed to be for block sparse, but we reluctantly accept it here. + return_Rp=false, verbose=false, kwargs..., ) @@ -490,6 +491,11 @@ function qx( if do_rank_reduction #if do_rank_reduction==false then don't change bpivot. bpivot = true end + if !bpivot && return_Rp + @warn "User requested return of Rp matrix with no pivoting." * + " Please eneable QR/LQ with pivoting to return the Rp matrix." + return_Rp = false + end pivot = call_pivot(bpivot, qx) #Convert the bool to whatever type the qx function expects. if bpivot @@ -507,7 +513,12 @@ function qx( # # undo the permutation on R, so the T=Q*R again. # - bpivot && (XM = XM[:, invperm(p)]) + if bpivot + if return_Rp + XMp = XM # If requested save the permuted columns version of X + end + XM = XM[:, invperm(p)] # un-permute the columns of X + end # # Make the new indices to go onto Q and X # @@ -519,7 +530,13 @@ function qx( Xinds = IndsT((q, ind(T, 2))) Q = tensor(Dense(vec(QM)), Qinds) X = tensor(Dense(vec(XM)), Xinds) - return Q, X, p + if return_Rp + Xp = tensor(Dense(vec(XMp)), Xinds) + else + Xp = nothing + end + + return Q, X, Xp end # Required by svd_recursive diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 1c6083e31c..42f7c00fc6 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -2,7 +2,7 @@ using NDTensors using LinearAlgebra using Test -# Not available on CI machine that test NDTensors. +# Not available on CI machine that tests NDTensors. # using Random # Random.seed!(314159) @@ -32,12 +32,18 @@ end positive in [false, true], singular in [false, true], rank_reveal in [false, true], - pivot in [false, true] + pivot in [false, true], + return_Rp in [false, true] if qx == ql && (rank_reveal || pivot) continue end + # avoid warnings. + if !(rank_reveal || pivot) && return_Rp + continue + end + eps = Base.eps(real(elt)) * 30 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. atol = rank_reveal ? eps * 1.0 : -1.0 @@ -54,7 +60,9 @@ end end end # you can set verbose=true if you want to get debug output on rank reduction. - Q, X, p = qx(A; positive=positive, atol=atol, pivot=pivot, verbose=false) #X is R or L. + Q, X, Xp = qx( + A; positive=positive, atol=atol, pivot=pivot, return_Rp=return_Rp, verbose=false + ) #X is R or L. @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps if dim(Q, 1) == dim(Q, 2) @@ -67,8 +75,7 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if positive && !isnothing(p) - Xp = X[:, p] #permute columns so diag gets restored to the right place. + if positive && !isnothing(Xp) nr, nc = size(Xp) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(Xp[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. @@ -81,7 +88,7 @@ end @test dim(X, 1) == 2 #Redundant? end if (atol >= 0.0 || pivot) && qx == qr - @test !isnothing(p) + @test !isnothing(Xp) == return_Rp end # # Tall matrix (more rows than cols) @@ -93,7 +100,9 @@ end A[i, :] = A[1, :] end end - Q, X, p = qx(A; positive=positive, atol=atol, pivot=pivot, verbose=false) + Q, X, Xp = qx( + A; positive=positive, atol=atol, pivot=pivot, return_Rp=return_Rp, verbose=false + ) @test A ≈ Q * X atol = eps @test array(Q)' * array(Q) ≈ Diagonal(fill(1.0, dim(Q, 2))) atol = eps #@test array(Q) * array(Q)' no such relationship for tall matrices. @@ -104,8 +113,7 @@ end @test all(real(diagX) .>= 0.0) @test all(imag(diagX) .== 0.0) end - if positive && !isnothing(p) - Xp = X[:, p] #permute columns so diag gets restored to the right place. + if positive && !isnothing(Xp) nr, nc = size(Xp) dr = qx == ql ? Base.max(0, nc - nr) : 0 diagX = diag(Xp[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. @@ -117,7 +125,7 @@ end @test dim(X, 1) == 4 #Redundant? end if (atol >= 0.0 || pivot) && qx == qr - @test !isnothing(p) + @test !isnothing(Xp) == return_Rp end end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 05e82e5add..8b3c75d1f6 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -390,6 +390,8 @@ remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) = (Q * dag(vαl), R * d remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, vαr) = (Q, R * dag(vαr)) remove_trivial_index(Q::ITensor, R::ITensor, vαl, ::Nothing) = (Q * dag(vαl), R) remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing) = (Q, R) +remove_trivial_index(R::ITensor, vαr) = R * dag(vαr) +remove_trivial_index(R::ITensor, ::Nothing) = R # # Force users to knowingly ask for zero indices using qr(A,()) syntax @@ -447,7 +449,9 @@ end # # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # -function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) +function qx( + qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, return_Rp=false, kwargs... +) # Strip out any extra indices that are not in A. # Unit test test/base/test_itensor.jl line 1469 will fail without this. Linds = commoninds(A, Linds) @@ -468,14 +472,8 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar # AC = permute(AC, cL, cR; allow_alias=true) - QXp = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. - if length(QXp) == 3 - QT, XT, perm = QXp - else - QT, XT = QXp #ITensorGPU does not return a perm yet. - perm = nothing - end - + QXp = qx(tensor(AC); return_Rp, kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. + QT, XT = QXp[1], QXp[2] # # Undo the combine oepration, to recover all tensor indices. # @@ -489,9 +487,19 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar q = commonind(Q, X) Q = settags(Q, tags, q) X = settags(X, tags, q) - q = settags(q, tags) - return Q, X, q, perm + # repeat all operations of Xp if requested by user. + if return_Rp && length(QXp) == 3 # GPU code does not support new features yet, so we check length as well. + Xp = itensor(QXp[3]) * dag(CR) + Xp = remove_trivial_index(Xp, vαr) + Xp = settags(Xp, tags, q) + else + Xp = nothing + end + + q = settags(q, tags) #fix tags of q last. + + return Q, X, q, Xp end # diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl index 7df99c184a..189265981b 100644 --- a/test/base/test_decomp.jl +++ b/test/base/test_decomp.jl @@ -409,7 +409,7 @@ end @test isnothing(p) end - @testset "Dense rank revealing QR/LQ decomp interface options" for qx in [qr, lq] + @testset "Dense rank revealing QR/LQ decomp interface options" begin l = Index(5, "l") s = Index(2, "s") r = Index(5, "r") @@ -418,36 +418,77 @@ end rinds = noncommoninds(A, qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - Q, R = qx(A, l, s) # no pivoting - Q, R, iq = qx(A, qrinds) # no pivoting + Q, R = qr(A, l, s) # no pivoting + Q, R, iq = qr(A, qrinds) # no pivoting @test dim(iq) == dim(qrinds) - Q, R, iq, p = qx(A, qrinds) # no pivoting - @test isnothing(p) + Q, R, iq, Rp = qr(A, qrinds) # no pivoting + @test isnothing(Rp) @test dim(iq) == dim(qrinds) # Q, R, iq, p = qx(A,qrinds; pivot=Val(false)) not supported - Q, R, iq, p = qx(A, qrinds; pivot=false) # no pivoting - @test isnothing(p) + Q, R, iq, Rp = qr(A, qrinds; pivot=false) # no pivoting + @test isnothing(Rp) + @test dim(iq) == dim(qrinds) + + Q, R, iq, Rp = qr(A, qrinds; pivot=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally + @test isnothing(Rp) + @test dim(iq) == dim(qrinds) + + Q, R, iq, Rp = qr(A, qrinds; pivot=true, return_Rp=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally + @test !isnothing(Rp) + @test dims(Rp) == dims(R) + @test dim(iq) == dim(qrinds) + + Q, R, iq, Rp = qr(A, qrinds; atol=1e-14, return_Rp=true) # absolute tolerance for rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) + @test dim(iq) == expected_rank + + Q, R, iq, Rp = qr(A, qrinds; rtol=1e-15, return_Rp=true) # relative tolerance for rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) + @test dim(iq) == expected_rank + + Q, R, iq, Rp = qr(A, qrinds; block_rtol=1e-15, return_Rp=true) # relative tolerance for rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) + @test dim(iq) == expected_rank + + # LQ versions + + L, Q = lq(A, l, s) # no pivoting + L, Q, iq = lq(A, qrinds) # no pivoting + @test dim(iq) == dim(qrinds) + L, Q, iq, Lp = lq(A, qrinds) # no pivoting + @test isnothing(Lp) + @test dim(iq) == dim(qrinds) + + L, Q, iq, Lp = lq(A, qrinds; pivot=false) # no pivoting + @test isnothing(Lp) @test dim(iq) == dim(qrinds) - Q, R, iq, p = qx(A, qrinds; pivot=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally - @test !isnothing(p) - @test length(p) == dim(rinds) + L, Q, iq, Lp = lq(A, qrinds; pivot=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally + @test isnothing(Lp) @test dim(iq) == dim(qrinds) - Q, R, iq, p = qx(A, qrinds; atol=1e-14) # absolute tolerance for rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + L, Q, iq, Lp = lq(A, qrinds; pivot=true, return_Rp=true) # pivoting but no rank reduction, sets `pivot=ColumnNorm()` internally + @test !isnothing(Lp) + @test dims(Lp) == dims(L) + @test dim(iq) == dim(qrinds) + + L, Q, iq, Lp = lq(A, qrinds; atol=1e-14, return_Rp=true) # absolute tolerance for rank reduction + @test !isnothing(Lp) + @test dims(Lp) == dims(L) @test dim(iq) == expected_rank - Q, R, iq, p = qx(A, qrinds; rtol=1e-15) # relative tolerance for rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + L, Q, iq, Lp = lq(A, qrinds; rtol=1e-15, return_Rp=true) # relative tolerance for rank reduction + @test !isnothing(Lp) + @test dims(Lp) == dims(L) @test dim(iq) == expected_rank - Q, R, iq, p = qx(A, qrinds; block_rtol=1e-15) # relative tolerance for rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + L, Q, iq, Lp = lq(A, qrinds; block_rtol=1e-15, return_Rp=true) # relative tolerance for rank reduction + @test !isnothing(Lp) + @test dims(Lp) == dims(L) @test dim(iq) == expected_rank end @@ -461,36 +502,40 @@ end rinds = noncommoninds(A, qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - Q, R, iq, p = qr(A, qrinds; pivot=NoPivot()) # no pivoting - @test isnothing(p) + Q, R, iq, Rp = qr(A, qrinds; pivot=NoPivot()) # no pivoting + @test isnothing(Rp) + @test dim(iq) == dim(qrinds) + + L, Q, iq, Lp = lq(A, qrinds; pivot=NoPivot()) # no pivoting + @test isnothing(Lp) @test dim(iq) == dim(qrinds) - L, Q, iq, p = lq(A, qrinds; pivot=NoPivot()) # no pivoting - @test isnothing(p) + Q, R, iq, Rp = qr(A, qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction + @test isnothing(Rp) @test dim(iq) == dim(qrinds) - Q, R, iq, p = qr(A, qrinds; pivot=ColumnNorm()) # column pivoting, no rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + Q, R, iq, Rp = qr(A, qrinds; pivot=ColumnNorm(), return_Rp=true) # column pivoting, no rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) @test dim(iq) == dim(qrinds) - L, Q, iq, p = lq(A, qrinds; pivot=RowNorm()) # row pivoting, no rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + L, Q, iq, Lp = lq(A, qrinds; pivot=RowNorm(), return_Rp=true) # row pivoting, no rank reduction + @test !isnothing(Lp) + @test dims(Lp) == dims(L) @test dim(iq) == dim(qrinds) @test_logs ( :warn, "Please use ColumnNorm() instead of RowNorm() for pivoted qr decomposition." - ) Q, R, iq, p = qr(A, qrinds; pivot=RowNorm()) # column pivoting, no rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + ) Q, R, iq, Rp = qr(A, qrinds; pivot=RowNorm(), return_Rp=true) # column pivoting, no rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) @test dim(iq) == dim(qrinds) @test_logs ( :warn, "Please use RowNorm() instead of ColumnNorm() for pivoted lq decomposition." - ) L, Q, iq, p = lq(A, qrinds; pivot=ColumnNorm()) # row pivoting, no rank reduction - @test !isnothing(p) - @test length(p) == dim(rinds) + ) L, Q, iq, Lp = lq(A, qrinds; pivot=ColumnNorm(), return_Rp=true) # row pivoting, no rank reduction + @test !isnothing(Lp) + @test dims(Lp) == dims(L) @test dim(iq) == dim(qrinds) end end @@ -506,19 +551,23 @@ end rinds = noncommoninds(A, qrinds) A, expected_rank = rank_fix(A, qrinds) #make all columns linear dependent on column 1, so rank==1. - Q, R, iq, p = qr(A, qrinds; atol=1e-14) # absolute tolerance for rank reduction - @test !isnothing(p) - @test length(p) > 0 + Q, R, iq, Rp = qr(A, qrinds; atol=1e-14) # absolute tolerance for rank reduction + @test isnothing(Rp) + @test dim(iq) == expected_rank + + Q, R, iq, Rp = qr(A, qrinds; atol=1e-14, return_Rp=true) # absolute tolerance for rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) @test dim(iq) == expected_rank - Q, R, iq, p = qr(A, qrinds; block_rtol=1e-15) # relative tolerance for rank reduction - @test !isnothing(p) - @test length(p) > 0 + Q, R, iq, Rp = qr(A, qrinds; block_rtol=1e-15, return_Rp=true) # relative tolerance for rank reduction + @test !isnothing(Rp) + @test dims(Rp) == dims(R) @test dim(iq) == expected_rank - Q, R, iq, p = qr(A, qrinds; block_rtol=1e-15, rtol=1000.0) # rtol ignored. - @test !isnothing(p) - @test length(p) > 0 + Q, R, iq, Rp = qr(A, qrinds; block_rtol=1e-15, rtol=1000.0, return_Rp=true) # rtol ignored. + @test !isnothing(Rp) + @test dims(Rp) == dims(R) @test dim(iq) == expected_rank end @@ -532,17 +581,17 @@ end Ainds = inds(A) A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q, p = qr(A, Ainds[1:ninds]; atol=1e-12) + Q, R, q, Rp = qr(A, Ainds[1:ninds]; atol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * R atol = 1e-13 @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test !isnothing(p) + @test isnothing(Rp) - L, Q, q, p = lq(A, Ainds[1:ninds]; atol=1e-12) + L, Q, q, Rp = lq(A, Ainds[1:ninds]; atol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test !isnothing(p) + @test isnothing(Rp) end @testset "Rank revealing QR/LQ decomp on MPO block-sparse $elt tensor" for ninds in @@ -558,17 +607,17 @@ end Ainds = inds(A) A, expected_rank = rank_fix(A, Ainds[1:ninds]) #make all columns linear dependent on column 1, so rank==1. - Q, R, q, p = qr(A, Ainds[1:ninds]; block_rtol=1e-12) + Q, R, q, Rp = qr(A, Ainds[1:ninds]; block_rtol=1e-12) @test dim(q) == expected_rank #check that we found teh correct rank @test A ≈ Q * R atol = 1e-13 @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - @test !isnothing(p) + @test isnothing(Rp) - L, Q, q, p = lq(A, Ainds[1:ninds]; block_rtol=1e-12) + L, Q, q, Rp = lq(A, Ainds[1:ninds]; block_rtol=1e-12) @test dim(q) == expected_rank #check that we found rank==1 @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - @test !isnothing(p) + @test isnothing(Rp) end @testset "factorize with QR" begin