Skip to content

Commit ac7bbee

Browse files
shivin9lopezm94
authored andcommitted
cg_mem_fix (missed this commit before) (#128)
* using nextvec! to decrease further consumption of memory * Modularize tests (so unrelated tests break less frequently)
1 parent 20ce135 commit ac7bbee

19 files changed

+362
-273
lines changed

src/cg.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,14 @@ function cg_method!(log::ConvergenceHistory, x, K, b;
3636
verbose && @printf("=== cg ===\n%4s\t%7s\n","iter","resnorm")
3737
tol = tol * norm(b)
3838
r = b - nextvec(K)
39+
q = zeros(r)
3940
z = solve(Pl,r)
4041
p = copy(z)
4142
γ = dot(r, z)
4243
for iter=1:maxiter
4344
nextiter!(log, mvps=1)
4445
append!(K, p)
45-
q = nextvec(K)
46+
nextvec!(q, K)
4647
α = γ/dot(p, q)
4748
# α>=0 || throw(PosSemidefException("α=$α"))
4849
@blas! x += α*p

src/gmres.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ end
2727
#One Arnoldi iteration
2828
#Optionally takes a truncation parameter l
2929
function arnoldi(K::KrylovSubspace, w; l=K.order)
30-
v = nextvec(K)
30+
v = similar(w)
31+
nextvec!(v,K)
3132
w = copy(v)
3233
n = min(length(K.v), l)
3334
h = zeros(eltype(v), n+1)

src/krylov.jl

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,15 @@ function nextvec{T,OpT<:Function}(K::KrylovSubspace{T,OpT})
107107
K.A(lastvec(K))
108108
end
109109

110+
function nextvec!{T}(out::AbstractArray{T}, K::KrylovSubspace{T})
111+
K.mvps += 1
112+
Base.A_mul_B!(out, K.A, lastvec(K))
113+
end
114+
function nextvec!{T,OpT<:Function}(out::AbstractArray{T}, K::KrylovSubspace{T,OpT})
115+
K.mvps += 1
116+
copy!(out, K.A(lastvec(K)))
117+
end
118+
110119
size(K::KrylovSubspace) = length(K.v)
111120
function size(K::KrylovSubspace, n::Int)
112121
if isa(K.A, AbstractMatrix)

test/cg.jl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ using IterativeSolvers
22
using FactCheck
33
using LinearMaps
44

5+
srand(1234321)
6+
57
include("getDivGrad.jl")
68

79
facts("cg") do

test/chebyshev.jl

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
using IterativeSolvers
2+
using FactCheck
3+
using Base.Test
4+
using LinearMaps
5+
6+
srand(1234321)
7+
8+
#Chebyshev
9+
facts("Chebyshev") do
10+
for T in (Float32, Float64, Complex64, Complex128)
11+
context("Matrix{$T}") do
12+
A=convert(Matrix{T}, randn(n,n))
13+
T<:Complex && (A+=convert(Matrix{T}, im*randn(n,n)))
14+
A=A+A'
15+
A=A'*A #Construct SPD matrix
16+
b=convert(Vector{T}, randn(n))
17+
T<:Complex && (b+=convert(Vector{T}, im*randn(n)))
18+
b=b/norm(b)
19+
tol = 0.1 #For some reason Chebyshev is very slow
20+
v = eigvals(A)
21+
mxv = maximum(v)
22+
mnv = minimum(v)
23+
x_cheby, c_cheby= chebyshev(A, b, mxv+(mxv-mnv)/100, mnv-(mxv-mnv)/100, tol=tol, maxiter=10^5, log=true)
24+
@fact c_cheby.isconverged --> true
25+
@fact norm(A*x_cheby-b) --> less_than(tol)
26+
end
27+
end
28+
end

test/common.jl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ using IterativeSolvers
22
using FactCheck
33
using LinearMaps
44

5+
srand(1234321)
6+
57
facts("Basic operations") do
68

79
context("Adivtype") do

test/gmres.jl

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
using IterativeSolvers
2+
using FactCheck
3+
using Base.Test
4+
using LinearMaps
5+
6+
srand(1234321)
7+
8+
#GMRES
9+
facts("gmres") do
10+
11+
for T in (Float32, Float64, Complex64, Complex128)
12+
context("Matrix{$T}") do
13+
14+
A = convert(Matrix{T}, randn(n,n))
15+
L = convert(Matrix{T}, randn(n,n))
16+
R = convert(Matrix{T}, randn(n,n))
17+
b = convert(Vector{T}, randn(n))
18+
if T <: Complex
19+
A += im*convert(Matrix{T}, randn(n,n))
20+
L += im*convert(Matrix{T}, randn(n,n))
21+
R += im*convert(Matrix{T}, randn(n,n))
22+
b += im*convert(Vector{T}, randn(n))
23+
end
24+
F = lufact(A)
25+
b = b/norm(b)
26+
27+
# Test optimality condition: residual should be non-increasing
28+
x_gmres, c_gmres = gmres(A, b, log = true, restart = 3, maxiter = 10);
29+
@fact all(diff(c_gmres[:resnorm]) .<= 0.0) --> true
30+
31+
x_gmres, c_gmres = gmres(A, b, Pl=L, Pr=R, log=true)
32+
@fact c_gmres.isconverged --> true
33+
@fact norm(A*x_gmres - b) --> less_than(eps(real(one(T))))
34+
35+
x_gmres, c_gmres = gmres(A, b, Pl=F, maxiter=1, restart=1, log=true)
36+
@fact c_gmres.isconverged --> true
37+
@fact norm(A*x_gmres - b) --> less_than(eps(real(one(T))))
38+
39+
x_gmres, c_gmres = gmres(A, b, Pl=1, Pr=F, maxiter=1, restart=1, log=true)
40+
@fact c_gmres.isconverged --> true
41+
@fact norm(A*x_gmres - b) --> less_than(eps(real(one(T))))
42+
end
43+
end
44+
45+
for T in (Float64, Complex128)
46+
context("SparseMatrixCSC{$T}") do
47+
A = sprandn(n,n,0.5)+0.001*eye(T,n,n)
48+
L = sprandn(n,n,0.5)
49+
R = sprandn(n,n,0.5)
50+
b = convert(Vector{T}, randn(n))
51+
if T <: Complex
52+
A += im*sprandn(n,n,0.5)
53+
L += im*sprandn(n,n,0.5)
54+
R += im*sprandn(n,n,0.5)
55+
b += im*randn(n)
56+
end
57+
F = lufact(A)
58+
b = b / norm(b)
59+
60+
# Test optimality condition: residual should be non-increasing
61+
x_gmres, c_gmres = gmres(A, b, log = true, restart = 3, maxiter = 10);
62+
@fact all(diff(c_gmres[:resnorm]) .<= 0.0) --> true
63+
64+
x_gmres, c_gmres= gmres(A, b, Pl=L, Pr=R, log=true)
65+
@fact c_gmres.isconverged --> true
66+
@fact norm(A*x_gmres - b) --> less_than(eps(real(one(T))))
67+
68+
x_gmres, c_gmres = gmres(A, b, Pl=F, maxiter=1, restart=1, log=true)
69+
@fact c_gmres.isconverged --> true
70+
@fact norm(A*x_gmres - b) --> less_than(eps(real(one(T))))
71+
72+
x_gmres, c_gmres = gmres(A, b, Pl=1, Pr=F, maxiter=1, restart=1, log=true)
73+
@fact c_gmres.isconverged --> true
74+
@fact norm(A*x_gmres - b) --> less_than(eps(real(one(T))))
75+
end
76+
77+
context("Linear operator defined as a function") do
78+
A = LinearMap(cumsum!, 100, 100, Float64; ismutating=true)
79+
rhs = randn(size(A,2))
80+
rhs/= norm(rhs)
81+
tol = 1e-5
82+
83+
x = gmres(A,rhs;tol=tol,maxiter=2000)
84+
@fact norm(A*x - rhs) --> less_than_or_equal(tol)
85+
end
86+
end
87+
end

test/history.jl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ using IterativeSolvers
22
using FactCheck
33
using Plots
44

5+
srand(1234321)
6+
57
unicodeplots() #Use UnicodePlots backend because it is the lightest
68

79
facts("ConvergenceHistory") do

test/idrs.jl

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
using IterativeSolvers
2+
using FactCheck
3+
using Base.Test
4+
using LinearMaps
5+
6+
n = 10
7+
m = 6
8+
srand(1234567)
9+
10+
#IDRs
11+
facts("idrs") do
12+
13+
for T in (Float32, Float64, Complex64, Complex128)
14+
# without smoothing
15+
context("Matrix{$T}, without residual smoothing") do
16+
17+
A = convert(Matrix{T}, randn(n,n))
18+
b = convert(Vector{T}, randn(n))
19+
if T <: Complex
20+
A += im*convert(Matrix{T}, randn(n,n))
21+
b += im*convert(Vector{T}, randn(n))
22+
end
23+
b = b/norm(b)
24+
25+
x_idrs, c_idrs = idrs(A, b, log=true)
26+
@fact c_idrs.isconverged --> true
27+
@fact norm(A*x_idrs - b) --> less_than(eps(real(one(T))))
28+
end
29+
30+
# with smoothing
31+
context("Matrix{$T}, with residual smoothing") do
32+
33+
A = convert(Matrix{T}, randn(n,n))
34+
b = convert(Vector{T}, randn(n))
35+
if T <: Complex
36+
A += im*convert(Matrix{T}, randn(n,n))
37+
b += im*convert(Vector{T}, randn(n))
38+
end
39+
b = b/norm(b)
40+
41+
x_idrs, c_idrs = idrs(A, b; smoothing=true, log=true)
42+
@fact c_idrs.isconverged --> true
43+
@fact norm(A*x_idrs - b) --> less_than(eps(real(one(T))))
44+
end
45+
end
46+
47+
for T in (Float64, Complex128)
48+
context("SparseMatrixCSC{$T}") do
49+
A = sprandn(n,n,0.5)+0.001*eye(T,n,n)
50+
b = convert(Vector{T}, randn(n))
51+
if T <: Complex
52+
A += im*sprandn(n,n,0.5)
53+
b += im*randn(n)
54+
end
55+
b = b / norm(b)
56+
57+
x_idrs, c_idrs= idrs(A, b, log=true)
58+
@fact c_idrs.isconverged --> true
59+
@fact norm(A*x_idrs - b) --> less_than(eps(real(one(T))))
60+
end
61+
end
62+
end

test/lanczos.jl

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
using IterativeSolvers
2+
using FactCheck
3+
using Base.Test
4+
using LinearMaps
5+
6+
srand(1234321)
7+
8+
#Lanczos methods
9+
10+
type MyOp{T}
11+
buf::Matrix{T}
12+
end
13+
14+
import Base: *, size, eltype
15+
16+
*(A::MyOp, x::AbstractVector) = A.buf*x
17+
size(A::MyOp, i) = size(A.buf, i)
18+
eltype(A::MyOp) = eltype(A.buf)
19+
20+
facts("eiglancz") do
21+
for T in (Float32, Float64)
22+
context("Matrix{$T}") do
23+
A = convert(Matrix{T}, randn(n,n))
24+
A = A + A' #Symmetric
25+
v = eigvals(A)
26+
27+
eval_lanczos, c_lanczos = eiglancz(A, log=true)
28+
@fact c_lanczos.isconverged --> true
29+
@fact norm(v - eval_lanczos) --> less_than(eps(T))
30+
end
31+
32+
context("Op{$T}") do
33+
A = MyOp(convert(Matrix{T}, randn(5,5)) |> t -> t + t')
34+
v = eigvals(Symmetric(A.buf))
35+
eval_lanczos, c_lanczos = eiglancz(A, log=true)
36+
@fact c_lanczos.isconverged --> true
37+
@fact norm(v - eval_lanczos) --> less_than(eps(T))
38+
end
39+
end
40+
end

0 commit comments

Comments
 (0)