Package hedge :: Package backends
[hide private]
[frames] | no frames]

Source Code for Package hedge.backends

  1  """Automated backend choosing.""" 
  2   
  3  __copyright__ = "Copyright (C) 2007 Andreas Kloeckner" 
  4   
  5  __license__ = """ 
  6  This program is free software: you can redistribute it and/or modify 
  7  it under the terms of the GNU General Public License as published by 
  8  the Free Software Foundation, either version 3 of the License, or 
  9  (at your option) any later version. 
 10   
 11  This program is distributed in the hope that it will be useful, 
 12  but WITHOUT ANY WARRANTY; without even the implied warranty of 
 13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 14  GNU General Public License for more details. 
 15   
 16  You should have received a copy of the GNU General Public License 
 17  along with this program.  If not, see U{http://www.gnu.org/licenses/}. 
 18  """ 
 19   
 20   
 21   
 22   
 23  import hedge.discretization 
 24  import hedge.mesh 
25 26 27 28 29 -class RunContext(object):
30 @property
31 - def rank(self):
32 raise NotImplementedError
33 34 @property
35 - def ranks(self):
36 raise NotImplementedError
37 38 @property
39 - def head_rank(self):
40 raise NotImplementedError
41 42 @property
43 - def is_head_rank(self):
44 return self.rank == self.head_rank
45
46 - def distribute_mesh(self, mesh, partition=None):
47 """Take the Mesh instance `mesh' and distribute it according to `partition'. 48 49 If partition is an integer, invoke PyMetis to partition the mesh into this 50 many parts, distributing over the first `partition' ranks. 51 52 If partition is None, act as if partition was the integer corresponding 53 to the current number of ranks on the job. 54 55 If partition is not an integer, it must be a mapping from element number to 56 rank. (A list or tuple of rank numbers will do, for example, or so will 57 a full-blown dict.) 58 59 Returns a mesh chunk. 60 61 We deliberately do not define the term `mesh chunk'. The return value 62 of this function is to be treated as opaque by the user, only to be 63 used as an argument to L{make_discretization}(). 64 65 This routine may only be invoked on the head rank. 66 """ 67 raise NotImplementedError
68
69 - def receive_mesh(self):
70 """Wait for a mesh chunk to be sent by the head rank. 71 72 We deliberately do not define the term `mesh chunk'. The return value 73 of this function is to be treated as opaque by the user, only to be 74 used as an argument to L{make_discretization}(). 75 76 This routine should only be invoked on non-head ranks. 77 """ 78 79 raise NotImplementedError
80
81 - def make_discretization(self, mesh_data, *args, **kwargs):
82 """Construct a Discretization instance. 83 84 `mesh_data' is whatever gets returned from distribute_mesh and 85 receive_mesh(). Any extra arguments are directly forwarded to 86 the respective Discretization constructor. 87 """ 88 raise NotImplementedError
89
90 91 92 93 94 -class SerialRunContext(RunContext):
95 communicator = None 96
97 - def __init__(self, discr_class):
98 self.discr_class = discr_class
99 100 @property
101 - def rank(self):
102 return 0
103 104 @property
105 - def ranks(self):
106 return [0]
107 108 @property
109 - def head_rank(self):
110 return 0
111
112 - def distribute_mesh(self, mesh, partition=None):
113 return mesh
114
115 - def make_discretization(self, mesh_data, *args, **kwargs):
116 return self.discr_class(mesh_data, *args, **kwargs)
117 118 119 120 121 FEAT_MPI = "mpi" 122 FEAT_CUDA = "cuda"
123 124 125 126 -def generate_features(allowed_features):
127 if FEAT_MPI in allowed_features: 128 import pytools.prefork 129 pytools.prefork.enable_prefork() 130 131 try: 132 import boostmpi.autoinit 133 except ImportError: 134 pass 135 else: 136 import boostmpi as mpi 137 if mpi.size > 1: 138 yield FEAT_MPI 139 140 if FEAT_CUDA in allowed_features: 141 try: 142 import pycuda 143 except ImportError: 144 have_cuda = False 145 else: 146 import pycuda.driver 147 try: 148 if pycuda.driver.Device.count(): 149 yield FEAT_CUDA 150 except pycuda.driver.LogicError: 151 # pycuda not initialized--we'll give it the benefit of the doubt. 152 yield FEAT_CUDA
153
154 155 156 157 -def guess_run_context(allow=None):
158 if allow is None: 159 import sys 160 161 i = 1 162 while i < len(sys.argv): 163 arg = sys.argv[i] 164 if arg.startswith("--features="): 165 allow = arg[arg.index("=")+1:].split(",") 166 i += 1 167 elif arg == "-f" and i+1 < len(sys.argv): 168 allow = sys.argv[i+1].split(",") 169 i += 2 170 else: 171 i += 1 172 173 if allow is None: 174 allow = [] 175 176 feat = list(generate_features(allow)) 177 178 if FEAT_CUDA in feat: 179 from hedge.backends.cuda import Discretization as discr_class 180 else: 181 from hedge.backends.jit import Discretization as discr_class 182 183 if FEAT_MPI in feat: 184 from hedge.backends.mpi import MPIRunContext 185 import boostmpi as mpi 186 return MPIRunContext(mpi.world, discr_class) 187 else: 188 return SerialRunContext(discr_class)
189