Package hedge :: Package timestep :: Package multirate_ab
[hide private]
[frames] | no frames]

Source Code for Package hedge.timestep.multirate_ab

  1  # -*- coding: utf8 -*- 
  2   
  3  """Multirate-AB ODE solver.""" 
  4   
  5  from __future__ import division 
  6   
  7  __copyright__ = "Copyright (C) 2007 Andreas Kloeckner" 
  8   
  9  __license__ = """ 
 10  This program is free software: you can redistribute it and/or modify 
 11  it under the terms of the GNU General Public License as published by 
 12  the Free Software Foundation, either version 3 of the License, or 
 13  (at your option) any later version. 
 14   
 15  This program is distributed in the hope that it will be useful, 
 16  but WITHOUT ANY WARRANTY; without even the implied warranty of 
 17  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 18  GNU General Public License for more details. 
 19   
 20  You should have received a copy of the GNU General Public License 
 21  along with this program.  If not, see U{http://www.gnu.org/licenses/}. 
 22  """ 
 23   
 24   
 25   
 26   
 27  import numpy 
 28  import numpy.linalg as la 
 29  from pytools import memoize, memoize_method 
 30  from hedge.timestep.base import TimeStepper 
 31  from hedge.timestep.rk4 import RK4TimeStepper 
 32  from hedge.timestep.ab import \ 
 33          make_generic_ab_coefficients, \ 
 34          make_ab_coefficients 
 35  from hedge.timestep.multirate_ab.methods import \ 
 36          HIST_NAMES 
 37  from hedge.timestep.multirate_ab.processors import \ 
 38          MRABProcessor 
39 40 41 42 43 # helper functions ------------------------------------------------------------ 44 45 -def _linear_comb(coefficients, vectors):
46 from operator import add 47 return reduce(add, 48 (coeff * v for coeff, v in 49 zip(coefficients, vectors)))
50
51 52 53 54 55 -class TwoRateAdamsBashforthTimeStepper(TimeStepper):
56 """Simultaneously timesteps two parts of an ODE system, 57 the first with a small timestep, the second with a large timestep. 58 59 [1] C.W. Gear and D.R. Wells, "Multirate linear multistep methods," BIT 60 Numerical Mathematics, vol. 24, Dec. 1984, pg. 484-502. 61 """ 62
63 - def __init__(self, method, large_dt, substep_count, order, 64 order_f2f=None, order_s2f=None, 65 order_f2s=None, order_s2s=None, 66 startup_stepper=None):
67 68 if isinstance(method, str): 69 from hedge.timestep.multirate_ab.methods import methods 70 method = methods[method] 71 self.method = method 72 73 self.large_dt = large_dt 74 self.small_dt = large_dt/substep_count 75 self.substep_count = substep_count 76 77 from hedge.timestep.multirate_ab.methods import \ 78 HIST_F2F, HIST_S2F, HIST_F2S, HIST_S2S 79 80 self.orders = { 81 HIST_F2F: order_f2f, 82 HIST_S2F: order_s2f, 83 HIST_F2S: order_f2s, 84 HIST_S2S: order_s2s, 85 } 86 87 for hn in HIST_NAMES: 88 if self.orders[hn] is None: 89 self.orders[hn] = order 90 91 self.max_order = max(self.orders.values()) 92 93 # histories of rhs evaluations 94 self.histories = dict((hn, []) for hn in HIST_NAMES) 95 96 if startup_stepper is not None: 97 self.startup_stepper = startup_stepper 98 else: 99 self.startup_stepper = RK4TimeStepper() 100 101 self.startup_history = [] 102 103 self.hist_is_fast = { 104 HIST_F2F: True, 105 HIST_S2F: self.method.s2f_hist_is_fast, 106 HIST_S2S: False, 107 HIST_F2S: False 108 }
109
110 - def __call__(self, ys, t, rhss):
111 """ 112 @arg rhss: Matrix of right-hand sides, stored in row-major order, i.e. 113 C{[f2f, s2f, f2s, s2s]}. 114 """ 115 from hedge.tools import make_obj_array 116 117 def finish_startup(): 118 # we're done starting up, pack data into split histories 119 for i, hn in enumerate(HIST_NAMES): 120 hist = self.startup_history 121 if not self.hist_is_fast[hn]: 122 hist = hist[::self.substep_count] 123 124 hist = hist[:self.orders[hn]] 125 126 self.histories[hn] = [ 127 hist_entry[i] for hist_entry in hist] 128 129 assert len(self.histories[hn]) == self.orders[hn] 130 131 # here's some memory we won't need any more 132 self.startup_stepper = None 133 del self.startup_history
134 135 def combined_rhs(t, y): 136 y_fast, y_slow = y 137 return make_obj_array([ 138 rhs(t, lambda: y_fast, lambda: y_slow) 139 for rhs in rhss])
140 141 def combined_summed_rhs(t, y): 142 return numpy.sum(combined_rhs(t, y).reshape((2,2), order="C"), axis=1) 143 144 if self.startup_stepper is not None: 145 ys = make_obj_array(ys) 146 147 if self.max_order == 1: 148 # we're running forward Euler, no need for the startup stepper 149 150 assert not self.startup_history 151 self.startup_history.append(combined_rhs(t, ys)) 152 finish_startup() 153 return self.run_ab(ys, t, rhss) 154 155 for i in range(self.substep_count): 156 ys = self.startup_stepper(ys, t+i*self.small_dt, self.small_dt, 157 combined_summed_rhs) 158 self.startup_history.insert(0, combined_rhs(t+(i+1)*self.small_dt, ys)) 159 160 if len(self.startup_history) == self.max_order*self.substep_count: 161 finish_startup() 162 163 return ys 164 else: 165 return self.run_ab(ys, t, rhss) 166
167 - def run_ab(self, ys, t, rhss):
168 step_evaluator = _MRABEvaluator(self, ys, t, rhss) 169 step_evaluator.run() 170 return step_evaluator.get_result()
171 172 @memoize_method
173 - def get_coefficients(self, 174 for_fast_history, hist_head_time_level, 175 start_level, end_level, order):
176 177 history_times = numpy.arange(0, -order, -1, 178 dtype=numpy.float64) 179 180 if for_fast_history: 181 history_times /= self.substep_count 182 183 history_times += hist_head_time_level/self.substep_count 184 185 t_start = start_level/self.substep_count 186 t_end = end_level/self.substep_count 187 188 return make_generic_ab_coefficients( 189 history_times, t_start, t_end)
190
191 192 193 194 -class _MRABEvaluator(MRABProcessor):
195 - def __init__(self, stepper, y, t, rhss):
196 MRABProcessor.__init__(self, stepper.method, stepper.substep_count) 197 198 self.stepper = stepper 199 200 self.t_start = t 201 202 self.context = {} 203 self.var_time_level = {} 204 205 self.rhss = rhss 206 207 y_fast, y_slow = y 208 from hedge.timestep.multirate_ab.methods import CO_FAST, CO_SLOW 209 self.last_y = {CO_FAST: y_fast, CO_SLOW: y_slow} 210 211 self.hist_head_time_level = dict((hn, 0) for hn in HIST_NAMES)
212
213 - def integrate_in_time(self, insn):
214 from hedge.timestep.multirate_ab.methods import CO_FAST, CO_SLOW 215 from hedge.timestep.multirate_ab.methods import \ 216 HIST_F2F, HIST_S2F, HIST_F2S, HIST_S2S 217 218 if insn.component == CO_FAST: 219 self_hn, cross_hn = HIST_F2F, HIST_S2F 220 else: 221 self_hn, cross_hn = HIST_S2S, HIST_F2S 222 223 start_time_level = self.eval_expr(insn.start) 224 end_time_level = self.eval_expr(insn.end) 225 226 self_coefficients = self.stepper.get_coefficients( 227 self.stepper.hist_is_fast[self_hn], 228 self.hist_head_time_level[self_hn], 229 start_time_level, end_time_level, 230 self.stepper.orders[self_hn]) 231 cross_coefficients = self.stepper.get_coefficients( 232 self.stepper.hist_is_fast[cross_hn], 233 self.hist_head_time_level[cross_hn], 234 start_time_level, end_time_level, 235 self.stepper.orders[cross_hn]) 236 237 if start_time_level == 0 or (insn.result_name not in self.context): 238 my_y = self.last_y[insn.component] 239 assert start_time_level == 0 240 else: 241 my_y = self.context[insn.result_name]() 242 assert start_time_level == \ 243 self.var_time_level[insn.result_name] 244 245 hists = self.stepper.histories 246 self_history = hists[self_hn][:] 247 cross_history = hists[cross_hn][:] 248 if False: 249 my_integrated_y = memoize( 250 lambda: my_y + self.stepper.large_dt * ( 251 _linear_comb(self_coefficients, self_history) 252 + _linear_comb(cross_coefficients, cross_history))) 253 else: 254 my_new_y = my_y + self.stepper.large_dt * ( 255 _linear_comb(self_coefficients, self_history) 256 + _linear_comb(cross_coefficients, cross_history)) 257 my_integrated_y = lambda: my_new_y 258 259 self.context[insn.result_name] = my_integrated_y 260 self.var_time_level[insn.result_name] = end_time_level 261 262 MRABProcessor.integrate_in_time(self, insn)
263
264 - def history_update(self, insn):
265 time_slow = self.var_time_level[insn.slow_arg] 266 time_fast = self.var_time_level[insn.fast_arg] 267 268 assert time_slow == time_fast 269 270 t = (self.t_start 271 + self.stepper.large_dt*time_slow/self.stepper.substep_count) 272 273 rhs = self.rhss[HIST_NAMES.index(insn.which)] 274 275 hist = self.stepper.histories[insn.which] 276 hist.pop() 277 hist.insert(0, 278 rhs(t, 279 self.context[insn.fast_arg], 280 self.context[insn.slow_arg])) 281 282 if self.stepper.hist_is_fast[insn.which]: 283 self.hist_head_time_level[insn.which] += 1 284 else: 285 self.hist_head_time_level[insn.which] += self.stepper.substep_count 286 287 MRABProcessor.history_update(self, insn)
288
289 - def get_result(self):
290 meth = self.stepper.method 291 292 assert self.var_time_level[meth.result_slow] == self.stepper.substep_count 293 assert self.var_time_level[meth.result_fast] == self.stepper.substep_count 294 295 for hn in HIST_NAMES: 296 assert self.hist_head_time_level[hn] == self.stepper.substep_count 297 298 return (self.context[meth.result_fast](), 299 self.context[meth.result_slow]())
300