Package hedge :: Module iterative
[hide private]
[frames] | no frames]

Source Code for Module hedge.iterative

  1  """Iterative solution of linear systems of equations.""" 
  2   
  3  from __future__ import division 
  4   
  5  __copyright__ = "Copyright (C) 2007 Andreas Kloeckner" 
  6   
  7  __license__ = """ 
  8  This program is free software: you can redistribute it and/or modify 
  9  it under the terms of the GNU General Public License as published by 
 10  the Free Software Foundation, either version 3 of the License, or 
 11  (at your option) any later version. 
 12   
 13  This program is distributed in the hope that it will be useful, 
 14  but WITHOUT ANY WARRANTY; without even the implied warranty of 
 15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 16  GNU General Public License for more details. 
 17   
 18  You should have received a copy of the GNU General Public License 
 19  along with this program.  If not, see U{http://www.gnu.org/licenses/}. 
 20  """ 
 21   
 22   
 23   
 24   
 25   
 26  import numpy 
 27  import numpy.linalg as la 
28 29 30 31 32 -class OperatorBase(object):
33 @property
34 - def dtype(self):
35 raise NotImplementedError
36 37 @property
38 - def shape(self):
39 raise NotImplementedError
40
41 - def __neg__(self):
42 return NegOperator(self)
43
44 -class NegOperator(OperatorBase):
45 - def __init__(self, sub_op):
46 self.sub_op = sub_op
47 48 @property
49 - def dtype(self):
50 return self.sub_op.dtype
51 52 @property
53 - def shape(self):
54 return self.sub_op.shape
55
56 - def __call__(self, operand):
57 return -self.sub_op(operand)
58
59 -class IdentityOperator(OperatorBase):
60 - def __init__(self, dtype, n):
61 self.my_dtype = dtype 62 self.n = n
63 64 @property
65 - def dtype(self):
66 return self.my_dtype
67 68 @property
69 - def shape(self):
70 return self.n, self.n
71
72 - def __call__(self, operand):
73 return operand
74
75 76 77 78 -class DiagonalPreconditioner(OperatorBase):
79 - def __init__(self, diagonal):
80 self.diagonal = diagonal
81 82 @property
83 - def dtype(self):
84 return self.diagonal.dtype
85 86 @property
87 - def shape(self):
88 n = self.diagonal.shape[0] 89 return n, n
90
91 - def __call__(self, operand):
92 return self.diagonal*operand
93
94 95 96 97 -class ConvergenceError(RuntimeError):
98 pass
99
100 101 102 103 -class CGStateContainer:
104 - def __init__(self, operator, precon=None, dot=None):
105 if precon is None: 106 precon = IdentityOperator(operator.dtype, operator.shape[0]) 107 108 self.operator = operator 109 self.precon = precon 110 111 if dot is None: 112 dot = numpy.dot 113 114 def inner(a, b): 115 return dot(a, b.conj())
116 117 self.inner = inner
118
119 - def reset(self, rhs, x=None):
120 self.rhs = rhs 121 122 if x is None: 123 x = numpy.zeros((self.operator.shape[0],)) 124 self.x = x 125 126 ax = self.operator(x) 127 self.residual = rhs - ax 128 129 self.d = self.precon(self.residual) 130 131 self.delta = self.inner(self.residual, self.d) 132 return self.delta
133
134 - def one_iteration(self, compute_real_residual=False):
135 # typed up from J.R. Shewchuk, 136 # An Introduction to the Conjugate Gradient Method 137 # Without the Agonizing Pain, Edition 1 1/4 [8/1994] 138 # Appendix B3 139 140 q = self.operator(self.d) 141 myip = self.inner(self.d, q) 142 alpha = self.delta / myip 143 144 self.x += alpha * self.d 145 146 if compute_real_residual: 147 self.residual = self.rhs - self.operator(self.x) 148 else: 149 self.residual -= alpha*q 150 151 s = self.precon(self.residual) 152 delta_old = self.delta 153 self.delta = self.inner(self.residual, s) 154 155 beta = self.delta / delta_old; 156 self.d = s + beta * self.d; 157 158 return self.delta
159
160 - def run(self, max_iterations=None, tol=1e-7, debug_callback=None, debug=0):
161 if max_iterations is None: 162 max_iterations = 10 * self.operator.shape[0] 163 164 if self.inner(self.rhs, self.rhs) == 0: 165 return self.rhs 166 167 iterations = 0 168 delta_0 = delta = self.delta 169 while iterations < max_iterations: 170 compute_real_residual = \ 171 iterations % 50 == 0 or \ 172 abs(delta) < tol*tol * abs(delta_0) 173 174 delta = self.one_iteration( 175 compute_real_residual=compute_real_residual) 176 177 if debug_callback is not None: 178 if compute_real_residual: 179 what = "it+residual" 180 else: 181 what = "it" 182 183 debug_callback(what, iterations, self.x, 184 self.residual, self.d, delta) 185 186 if compute_real_residual and abs(delta) < tol*tol * abs(delta_0): 187 if debug_callback is not None: 188 debug_callback("end", iterations, self.x, self.residual, self.d, delta) 189 if debug: 190 print "%d iterations" % iterations 191 return self.x 192 193 if debug and iterations % debug == 0: 194 print "debug: delta=%g" % delta 195 iterations += 1 196 197 raise ConvergenceError("cg failed to converge")
198
199 200 201 202 -def parallel_cg(pcon, operator, b, precon=None, x=None, tol=1e-7, max_iterations=None, 203 debug=False, debug_callback=None, dot=None):
204 if x is None: 205 x = numpy.zeros((operator.shape[1],)) 206 207 cg = CGStateContainer(operator, precon, dot=dot) 208 cg.reset(b, x) 209 210 if not pcon.is_head_rank: 211 debug = False 212 213 return cg.run(max_iterations, tol, debug_callback, debug)
214