Skip to content

Commit

Permalink
Reduce memory usage in CG (JuliaLinearAlgebra#130)
Browse files Browse the repository at this point in the history
* Reduce memory usage in CG

* Bring back Krylov subspace again

* Fix deprecation warning
  • Loading branch information
haampie committed Jun 28, 2017
1 parent ac7bbee commit 002fa75
Show file tree
Hide file tree
Showing 3 changed files with 110 additions and 49 deletions.
44 changes: 44 additions & 0 deletions benchmark/benchmark-linear-systems.jl
@@ -0,0 +1,44 @@
module LinearSystemsBench

import Base.A_ldiv_B!, Base.\

using BenchmarkTools
using IterativeSolvers

# A DiagonalMatrix that doesn't check whether it is singular in the \ op.
immutable DiagonalPreconditioner{T}
diag::Vector{T}
end

function A_ldiv_B!{T}(y::AbstractVector{T}, A::DiagonalPreconditioner{T}, b::AbstractVector{T})
for i = 1 : length(b)
@inbounds y[i] = A.diag[i] \ b[i]
end
y
end

(\)(D::DiagonalPreconditioner, b::AbstractVector) = D.diag .\ b

function posdef(n)
A = SymTridiagonal(fill(2.01, n), fill(-1.0, n))
b = A * ones(n)
return A, b
end

function cg(; n = 1_000_000, tol = 1e-6, maxiter::Int = 200)
A, b = posdef(n)
P = DiagonalPreconditioner(collect(linspace(1.0, 2.0, n)))

println("Symmetric positive definite matrix of size ", n)
println("Eigenvalues in interval [0.01, 4.01]")
println("Tolerance = ", tol, "; max #iterations = ", maxiter)

# Dry run
initial = rand(n)
IterativeSolvers.cg!(copy(initial), A, b, Pl = P, maxiter = maxiter, tol = tol, log = false)

# Actual benchmark
@benchmark IterativeSolvers.cg!(x0, $A, $b, Pl = $P, maxiter = $maxiter, tol = $tol, log = false) setup=(x0 = copy($initial))
end

end
113 changes: 65 additions & 48 deletions src/cg.jl
@@ -1,66 +1,83 @@
export cg, cg!

####################
# API method calls #
####################

cg(A, b; kwargs...) = cg!(zerox(A,b), A, b; kwargs...)
cg(A, b; kwargs...) = cg!(zerox(A, b), A, b; kwargs...)

function cg!(x, A, b;
tol::Real=size(A,2)*eps(), maxiter::Integer=size(A,2),
plot=false, log::Bool=false, kwargs...
)
tol = sqrt(eps(real(eltype(b)))),
maxiter::Integer = min(20, size(A, 1)),
plot = false,
log::Bool = false,
kwargs...
)
(plot & !log) && error("Can't plot when log keyword is false")
K = KrylovSubspace(A, length(b), 1, Vector{Adivtype(A,b)}[])
init!(K, x)
history = ConvergenceHistory(partial=!log)
history = ConvergenceHistory(partial = !log)
history[:tol] = tol
if all(el->el==0, b)
fill!(x, zero(eltype(x)))
return log ? (x, history) : x
end
reserve!(history,:resnorm, maxiter)
cg_method!(history, x, K, b; tol=tol, maxiter=maxiter, kwargs...)
reserve!(history, :resnorm, maxiter)
cg_method!(history, x, K, b; tol = tol, maxiter = maxiter, kwargs...)
(plot || log) && shrink!(history)
plot && showplot(history)
log ? (x, history) : x
end

#########################
# Method Implementation #
#########################

function cg_method!(log::ConvergenceHistory, x, K, b;
Pl=1,tol::Real=size(K.A,2)*eps(),maxiter::Integer=size(K.A,2), verbose::Bool=false
)
verbose && @printf("=== cg ===\n%4s\t%7s\n","iter","resnorm")
tol = tol * norm(b)
r = b - nextvec(K)
q = zeros(r)
z = solve(Pl,r)
p = copy(z)
γ = dot(r, z)
for iter=1:maxiter
nextiter!(log, mvps=1)
append!(K, p)
nextvec!(q, K)
α = γ/dot(p, q)
# α>=0 || throw(PosSemidefException("α=$α"))
@blas! x += α*p
@blas! r += -α*q
resnorm = norm(r)
push!(log,:resnorm,resnorm)
verbose && @printf("%3d\t%1.2e\n",iter,resnorm)
resnorm < tol && break
solve!(z,Pl,r)
oldγ = γ
γ = dot(r, z)
β = γ/oldγ
@blas! p *= β
@blas! p += z
Pl = 1,
tol = sqrt(eps(real(eltype(b)))),
maxiter::Integer = min(20, size(K.A, 1)),
verbose::Bool = false
)
T = eltype(b)
n = size(K.A, 1)

# Initial residual vector
r = copy(b)
@blas! r -= one(T) * nextvec(K)
c = zeros(T, n)
u = zeros(T, n)
ρ = one(T)

iter = 0

last_residual = norm(r)

# Here you could save one inner product if norm(r) is used rather than norm(b)
reltol = norm(b) * tol

while last_residual > reltol && iter < maxiter
nextiter!(log, mvps = 1)

# Preconditioner: c = Pl \ r
solve!(c, Pl, r)

ρ_prev = ρ
ρ = dot(c, r)
β = -ρ / ρ_prev

# u := r - βu (almost an axpy)
@blas! u *= -β
@blas! u += one(T) * c

# c = A * u
append!(K, u)
nextvec!(c, K)
α = ρ / dot(u, c)

# Improve solution and residual
@blas! x += α * u
@blas! r -= α * c

iter += 1
last_residual = norm(r)

# Log progress
push!(log, :resnorm, last_residual)
verbose && @printf("%3d\t%1.2e\n", iter, last_residual)
end

verbose && @printf("\n")
setconv(log, 0<=norm(r)<tol)
setconv(log, last_residual < reltol)

x
end

Expand Down Expand Up @@ -145,4 +162,4 @@ end

@doc docstring[1] -> cg
@doc docstring[2] -> cg!
end
end
2 changes: 1 addition & 1 deletion test/getDivGrad.jl
Expand Up @@ -33,5 +33,5 @@ function spdiags(B,d,m,n)
a[(round(Int, len[k])+1):round(Int, len[k+1]),:] = [i i+d[k] B[i+(m >= n)*d[k], k]]
end

sparse(round(Int, a[:,1]), round(Int, a[:,2]), a[:,3], m, n)
sparse(round.(Int, a[:,1]), round.(Int, a[:,2]), a[:,3], m, n)
end

0 comments on commit 002fa75

Please sign in to comment.