|
| 1 | +import Base: iterate |
| 2 | + |
| 3 | +export qmr, qmr! |
| 4 | + |
| 5 | +mutable struct LanczosDecomp{T, rT, matT, mat_tT, vecT} |
| 6 | + A::matT |
| 7 | + At::mat_tT |
| 8 | + |
| 9 | + v_prev::vecT # Orthonormal basis vectors for A |
| 10 | + v_curr::vecT # Orthonormal basis vectors for A |
| 11 | + v_next::vecT # Orthonormal basis vectors for A |
| 12 | + |
| 13 | + w_prev::vecT # Orthonormal basis vectors for A' |
| 14 | + w_curr::vecT # Orthonormal basis vectors for A' |
| 15 | + w_next::vecT # Orthonormal basis vectors for A' |
| 16 | + |
| 17 | + α::T |
| 18 | + β_prev::T |
| 19 | + β_curr::T |
| 20 | + δ::T |
| 21 | + resnorm::rT |
| 22 | +end |
| 23 | +function LanczosDecomp(x, A::matT, b; initially_zero = false) where {matT} |
| 24 | + T = eltype(A) |
| 25 | + |
| 26 | + # we choose: |
| 27 | + # v_0 = 0, v_1 = b - Ax |
| 28 | + # w_0 = 0, w_1 = v_1 |
| 29 | + # v_2 and w_2 are uninitialized and used as workspace |
| 30 | + |
| 31 | + v_prev = zero(x) |
| 32 | + v_curr = copy(b) |
| 33 | + v_next = similar(x) |
| 34 | + |
| 35 | + if !initially_zero |
| 36 | + # r0 = A*x - b |
| 37 | + mul!(v_next, A, x) |
| 38 | + axpy!(-one(T), v_next, v_curr) |
| 39 | + end |
| 40 | + resnorm = norm(v_curr) |
| 41 | + rmul!(v_curr, inv(resnorm)) |
| 42 | + |
| 43 | + w_prev = zero(x) |
| 44 | + w_curr = copy(v_curr) |
| 45 | + w_next = similar(x) |
| 46 | + |
| 47 | + α = zero(T) |
| 48 | + β_prev = zero(T) |
| 49 | + β_curr = zero(T) |
| 50 | + δ = zero(T) |
| 51 | + |
| 52 | + LanczosDecomp( |
| 53 | + A, adjoint(A), |
| 54 | + v_prev, v_curr, v_next, |
| 55 | + w_prev, w_curr, w_next, |
| 56 | + α, β_prev, β_curr, δ, |
| 57 | + resnorm) |
| 58 | +end |
| 59 | + |
| 60 | +start(::LanczosDecomp) = 1 |
| 61 | +done(l::LanczosDecomp, iteration::Int) = false |
| 62 | +function iterate(l::LanczosDecomp, iteration::Int=start(l)) |
| 63 | + # Following Algorithm 7.1 of Saad |
| 64 | + if done(l, iteration) return nothing end |
| 65 | + |
| 66 | + mul!(l.v_next, l.A, l.v_curr) |
| 67 | + |
| 68 | + l.α = dot(l.v_next, l.w_curr) # v_next currently contains A*v_curr |
| 69 | + axpy!(-conj(l.α), l.v_curr, l.v_next) |
| 70 | + if iteration > 1 # β_1 = 0 |
| 71 | + axpy!(-conj(l.β_curr), l.v_prev, l.v_next) |
| 72 | + end |
| 73 | + |
| 74 | + mul!(l.w_next, l.At, l.w_curr) |
| 75 | + axpy!(-l.α, l.w_curr, l.w_next) |
| 76 | + if iteration > 1 # δ_1 = 0 |
| 77 | + axpy!(-l.δ, l.w_prev, l.w_next) |
| 78 | + end |
| 79 | + |
| 80 | + vw = dot(l.v_next, l.w_next) |
| 81 | + l.δ = sqrt(abs(vw)) |
| 82 | + if iszero(l.δ) return nothing end |
| 83 | + |
| 84 | + l.β_prev = l.β_curr |
| 85 | + l.β_curr = vw / l.δ |
| 86 | + |
| 87 | + rmul!(l.v_next, inv(l.δ)) |
| 88 | + rmul!(l.w_next, inv(l.β_curr)) |
| 89 | + |
| 90 | + l.w_next, l.w_curr, l.w_prev = l.w_prev, l.w_next, l.w_curr |
| 91 | + l.v_next, l.v_curr, l.v_prev = l.v_prev, l.v_next, l.v_curr |
| 92 | + |
| 93 | + return nothing, iteration + 1 |
| 94 | +end |
| 95 | + |
| 96 | +mutable struct QMRIterable{T, xT, rT, lanczosT} |
| 97 | + x::xT |
| 98 | + |
| 99 | + lanczos::lanczosT |
| 100 | + resnorm::rT |
| 101 | + reltol::rT |
| 102 | + maxiter::Int |
| 103 | + |
| 104 | + g::Vector{T} # right-hand size following Hessenberg multiplication |
| 105 | + H::Vector{T} # Hessenberg Matrix, computed on CPU |
| 106 | + |
| 107 | + c_prev::T |
| 108 | + c_curr::T |
| 109 | + s_prev::T |
| 110 | + s_curr::T |
| 111 | + |
| 112 | + p_prev::xT |
| 113 | + p_curr::xT |
| 114 | +end |
| 115 | + |
| 116 | +function qmr_iterable!(x, A, b; |
| 117 | + tol = sqrt(eps(real(eltype(b)))), |
| 118 | + maxiter::Int = size(A, 2), |
| 119 | + initially_zero::Bool = false, |
| 120 | + lookahead::Bool = false |
| 121 | + ) |
| 122 | + T = eltype(x) |
| 123 | + |
| 124 | + lanczos = LanczosDecomp(x, A, b, initially_zero = initially_zero) |
| 125 | + |
| 126 | + resnorm = lanczos.resnorm |
| 127 | + g = [resnorm; zero(T)] |
| 128 | + H = zeros(T, 4) |
| 129 | + |
| 130 | + # Givens rotations |
| 131 | + c_prev, s_prev = one(T), zero(T) |
| 132 | + c_curr, s_curr = one(T), zero(T) |
| 133 | + |
| 134 | + # Projection operators |
| 135 | + p_prev = zero(x) |
| 136 | + p_curr = zero(x) |
| 137 | + |
| 138 | + reltol = tol * lanczos.resnorm |
| 139 | + |
| 140 | + QMRIterable(x, |
| 141 | + lanczos, resnorm, reltol, |
| 142 | + maxiter, |
| 143 | + g, H, |
| 144 | + c_prev, c_curr, s_prev, s_curr, |
| 145 | + p_prev, p_curr |
| 146 | + ) |
| 147 | +end |
| 148 | + |
| 149 | +converged(q::QMRIterable) = q.resnorm ≤ q.reltol |
| 150 | +start(::QMRIterable) = 1 |
| 151 | +done(q::QMRIterable, iteration::Int) = iteration > q.maxiter || converged(q) |
| 152 | + |
| 153 | +function iterate(q::QMRIterable, iteration::Int=start(q)) |
| 154 | + if done(q, iteration) return nothing end |
| 155 | + |
| 156 | + iterate(q.lanczos, iteration) |
| 157 | + |
| 158 | + # for classical Lanczos algorithm, only need 4 elements of last column |
| 159 | + # of Hessenberg matrix |
| 160 | + # H[1] is h_m,(m-2), H[2] is h_m,(m-1), H[3] is h_m,m, and H[4] is h_m,(m+1) |
| 161 | + q.H[2] = conj(q.lanczos.β_prev) # β_m |
| 162 | + q.H[3] = conj(q.lanczos.α) # α_m |
| 163 | + q.H[4] = q.lanczos.δ # δ_(m+1) |
| 164 | + |
| 165 | + # Rotation on H[1] and H[2]. Note that H[1] = 0 initially |
| 166 | + if iteration > 2 |
| 167 | + q.H[1] = q.s_prev * q.H[2] |
| 168 | + q.H[2] = q.c_prev * q.H[2] |
| 169 | + end |
| 170 | + |
| 171 | + # Rotation on H[2] and H[3] |
| 172 | + if iteration > 1 |
| 173 | + tmp = -conj(q.s_curr) * q.H[2] + q.c_curr * q.H[3] |
| 174 | + q.H[2] = q.c_curr * q.H[2] + q.s_curr * q.H[3] |
| 175 | + q.H[3] = tmp |
| 176 | + end |
| 177 | + |
| 178 | + a, b = q.H[3], q.H[4] |
| 179 | + # Compute coefficients for Ω_m, and apply it |
| 180 | + c, s, q.H[3] = givensAlgorithm(q.H[3], q.H[4]) |
| 181 | + |
| 182 | + # Apply Ω_m to rhs |
| 183 | + q.g[2] = -conj(s) * q.g[1] |
| 184 | + q.g[1] = c * q.g[1] |
| 185 | + |
| 186 | + # H is now properly factorized |
| 187 | + # compute p_m |
| 188 | + # we re-use q.lanczos.v_next as workspace for p_m |
| 189 | + q.lanczos.v_next .= q.lanczos.v_prev # we need v_m, not v_m+1 |
| 190 | + iteration > 1 && axpy!(-q.H[2], q.p_curr, q.lanczos.v_next) |
| 191 | + iteration > 2 && axpy!(-q.H[1], q.p_prev, q.lanczos.v_next) |
| 192 | + rmul!(q.lanczos.v_next, inv(q.H[3])) |
| 193 | + |
| 194 | + # iterate x_n = x_n-1 + γ_m p_m |
| 195 | + axpy!(q.g[1], q.lanczos.v_next, q.x) |
| 196 | + |
| 197 | + # transfer coefficients of Givens rotations for next iteration |
| 198 | + q.c_prev, q.s_prev, q.c_curr, q.s_curr = q.c_curr, q.s_curr, c, s |
| 199 | + q.p_prev .= q.p_curr |
| 200 | + q.p_curr .= q.lanczos.v_next |
| 201 | + q.g[1] = q.g[2] |
| 202 | + |
| 203 | + # approximate residual |
| 204 | + # See Proposition 7.3 of Saad |
| 205 | + q.resnorm = abs(q.g[2]) |
| 206 | + |
| 207 | + q.resnorm, iteration + 1 |
| 208 | +end |
| 209 | + |
| 210 | +""" |
| 211 | + qmr(A, b; kwargs...) -> x, [history] |
| 212 | +
|
| 213 | +Same as [`qmr!`](@ref), but allocates a solution vector `x` initialized with zeros. |
| 214 | +""" |
| 215 | +qmr(A, b; kwargs...) = qmr!(zerox(A, b), A, b; initially_zero = true, kwargs...) |
| 216 | + |
| 217 | +""" |
| 218 | + qmr!(x, A, b; kwargs...) -> x, [history] |
| 219 | +
|
| 220 | +Solves the problem ``Ax = b`` with the Quasi-Minimal Residual (QMR) method. |
| 221 | +
|
| 222 | +# Arguments |
| 223 | +- `x`: Initial guess, will be updated in-place; |
| 224 | +- `A`: linear operator; |
| 225 | +- `b`: right-hand side. |
| 226 | +
|
| 227 | +## Keywords |
| 228 | +
|
| 229 | +- `initally_zero::Bool`: If `true` assumes that `iszero(x)` so that one |
| 230 | + matrix-vector product can be saved when computing the initial residual |
| 231 | + vector; |
| 232 | +- `maxiter::Int = size(A, 2)`: maximum number of iterations; |
| 233 | +- `tol`: relative tolerance; |
| 234 | +- `log::Bool`: keep track of the residual norm in each iteration; |
| 235 | +- `verbose::Bool`: print convergence information during the iteration. |
| 236 | +
|
| 237 | +# Return values |
| 238 | +
|
| 239 | +**if `log` is `false`** |
| 240 | +
|
| 241 | +- `x`: approximate solution. |
| 242 | +
|
| 243 | +**if `log` is `true`** |
| 244 | +
|
| 245 | +- `x`: approximate solution; |
| 246 | +
|
| 247 | +- `history`: convergence history. |
| 248 | +
|
| 249 | +[^Saad2003]: |
| 250 | + Saad, Y. (2003). Interactive method for sparse linear system. |
| 251 | +[^Freund1990]: |
| 252 | + Freund, W. R., & Nachtigal, N. M. (1990). QMR : for a Quasi-Minimal |
| 253 | + Residual Linear Method Systems. (December). |
| 254 | +""" |
| 255 | +function qmr!(x, A, b; |
| 256 | + tol = sqrt(eps(real(eltype(b)))), |
| 257 | + maxiter::Int = size(A, 2), |
| 258 | + lookahead::Bool = false, |
| 259 | + log::Bool = false, |
| 260 | + initially_zero::Bool = false, |
| 261 | + verbose::Bool = false |
| 262 | +) |
| 263 | + history = ConvergenceHistory(partial = !log) |
| 264 | + history[:tol] = tol |
| 265 | + log && reserve!(history, :resnorm, maxiter) |
| 266 | + |
| 267 | + iterable = qmr_iterable!(x, A, b; tol = tol, maxiter = maxiter, initially_zero = initially_zero) |
| 268 | + |
| 269 | + verbose && @printf("=== qmr ===\n%4s\t%7s\n","iter","resnorm") |
| 270 | + |
| 271 | + for (iteration, residual) = enumerate(iterable) |
| 272 | + if log |
| 273 | + nextiter!(history) |
| 274 | + push!(history, :resnorm, residual) |
| 275 | + end |
| 276 | + |
| 277 | + verbose && @printf("%3d\t%1.2e\n", iteration, residual) |
| 278 | + end |
| 279 | + |
| 280 | + verbose && println() |
| 281 | + log && setconv(history, converged(iterable)) |
| 282 | + log && shrink!(history) |
| 283 | + |
| 284 | + log ? (x, history) : x |
| 285 | +end |
0 commit comments