Alpine3D  Alpine3D-3.2.0
MPIControl.h
Go to the documentation of this file.
1 /***********************************************************************************/
2 /* Copyright 2009-2015 WSL Institute for Snow and Avalanche Research SLF-DAVOS */
3 /***********************************************************************************/
4 /* This file is part of Alpine3D.
5  Alpine3D is free software: you can redistribute it and/or modify
6  it under the terms of the GNU Lesser General Public License as published by
7  the Free Software Foundation, either version 3 of the License, or
8  (at your option) any later version.
9 
10  Alpine3D is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU Lesser General Public License for more details.
14 
15  You should have received a copy of the GNU Lesser General Public License
16  along with Alpine3D. If not, see <http://www.gnu.org/licenses/>.
17 */
18 #ifndef MPIWRAPPER_H
19 #define MPIWRAPPER_H
20 
21 #ifdef ENABLE_MPI
22  #include <mpi.h>
23 #endif
24 
25 #ifdef _OPENMP
26  #include <omp.h>
27 #endif
28 
29 #include <meteoio/MeteoIO.h>
30 #include <cstdio>
31 
44 {
45  public:
51  static MPIControl& instance();
52 
53  // The following methods refer to OpenMP
58  bool openmp() const;
59 
64  size_t thread() const;
65 
70  size_t max_threads() const;
71 
72  // The following methods refer to MPI
77  bool master() const;
78 
83  size_t master_rank() const;
84 
89  size_t rank() const;
90 
95  size_t size() const;
96 
101  std::string name() const;
102 
107  void barrier() const;
108 
116  void gather(const int& send_value, std::vector<int>& receive_vector, const size_t& root = 0);
117 
119 
126  void allreduce_max(double& value);
127  void allreduce_min(double& value);
128  void allreduce_sum(double& value);
129  void allreduce_sum(int& value);
131 
140  template<class T> static void deserialize(const void* in, const size_t& len, T& obj)
141  {
142  std::stringstream obj_stream;
143  obj_stream.write(reinterpret_cast<const char*>(in), len);
144  obj_stream >> obj;
145  }
146 
155  template<class T> static size_t serialize(void** out, const T& obj, const bool alloc=false)
156  {
157  std::stringstream obj_stream;
158  obj_stream << obj;
159  const size_t len = obj_stream.str().size();
160  if (alloc) {
161  *out = new char[len];
162  }
163  obj_stream.read(reinterpret_cast<char*>(*out), len);
164  return len;
165  }
166 
167  #ifdef ENABLE_MPI
168 
177  template <class T> static void op_sum_func(void* in, void* out, int* /*len*/, MPI_Datatype* datatype)
178  {
179  int tmp_size;
180  MPI_Type_size(*datatype, &tmp_size);
181 
182  T in_obj, out_obj;
183 
184  deserialize(in, static_cast<size_t>(tmp_size), in_obj);
185  deserialize(out, static_cast<size_t>(tmp_size), out_obj);
186 
187  out_obj += in_obj;
188 
189  serialize(&out, out_obj);
190  }
191  #endif
192 
193  #ifdef ENABLE_MPI
194 
199  template <class T> void allreduce_sum(T& obj)
200  {
201  if (size_ <= 1) return;
202 
203  MPI_Op op;
204  MPI_Op_create(op_sum_func<T>, true, &op);
205 
206  void* in_obj_char=NULL;
207  void* out_obj_char=NULL;
208 
209  const size_t len = serialize(&in_obj_char, obj, true);
210  out_obj_char = new char[len];
211 
212  MPI_Datatype chars_datatype;
213  MPI_Type_contiguous(static_cast<int>(len), MPI_CHAR, &chars_datatype);
214  MPI_Type_commit(&chars_datatype);
215 
216  MPI_Allreduce(in_obj_char, out_obj_char, 1, chars_datatype, op, MPI_COMM_WORLD);
217  MPI_Op_free(&op);
218 
219  deserialize(out_obj_char, len, obj);
220 
221  delete[] (char*)out_obj_char;
222  delete[] (char*)in_obj_char;
223  }
224  #else
225  template <class T> void allreduce_sum(T& /*obj*/) {}
226  #endif
227 
228  #ifdef ENABLE_MPI
229 
235  template <class T> void broadcast(T& obj, const size_t& root = 0)
236  {
237  if (size_ <= 1) return;
238 
239  std::string obj_string;
240 
241  if (rank_ == root) { // Build the string that represents the object
242  std::stringstream obj_stream;
243  obj_stream << obj;
244  obj_string.insert(0, obj_stream.str());
245  }
246 
247  broadcast(obj_string, root);
248 
249  if (rank_ != root) { // Deserialize the object broadcasted
250  std::stringstream obj_stream;
251  obj_stream << obj_string;
252  obj_stream >> obj;
253  }
254  }
255  #else
256  template <class T> void broadcast(T& /*obj*/, const size_t& root=0) {(void)root;}
257  #endif
258 
259  #ifdef ENABLE_MPI
260 
267  template <class T> void broadcast(std::vector<T>& vec_obj, const size_t& root = 0)
268  {
269  if (size_ <= 1) return;
270 
271  std::string obj_string;
272  size_t vec_size;
273 
274  if (rank_ == root) { // Build the string that represents the objects
275  std::stringstream objs_stream;
276  for (size_t ii=0; ii<vec_obj.size(); ii++)
277  objs_stream << vec_obj[ii];
278  obj_string.insert(0, objs_stream.str());
279  vec_size = vec_obj.size();
280  }
281 
282  broadcast(vec_size, root);
283  broadcast(obj_string, root);
284 
285  if (rank_ != root) { // Deserialize the objects broadcasted
286  vec_obj.clear();
287 
288  std::stringstream objs_stream;
289  objs_stream << obj_string;
290 
291  T tmp;
292  for (size_t ii=0; ii<vec_size; ii++) {
293  objs_stream >> tmp;
294  vec_obj.push_back(tmp);
295  }
296  }
297  }
298  #else
299  template <class T> void broadcast(std::vector<T>& /*vec_obj*/, const size_t& root=0) {(void)root;}
300  #endif
301 
302  #ifdef ENABLE_MPI
303 
313  template <class T> void scatter(std::vector<T*>& vec_local, const size_t& root = 0)
314  {
315  if (size_ <= 1) return;
316 
317  if (rank_ == root) {
318  for (size_t ii=1; ii<size_; ii++) { // HACK: Assuming root is master
319  size_t startx, deltax;
320  getArraySliceParams(vec_local.size(), ii, startx, deltax);
321 
322  send((int)deltax, ii);
323  for (size_t jj=startx; jj<(startx+deltax); jj++) {
324  std::string obj_string;
325  std::stringstream objs_stream;
326 
327  objs_stream << *(vec_local[jj]);
328  obj_string.insert(0, objs_stream.str());
329  send(obj_string, ii);
330  delete vec_local[jj];
331  }
332  }
333 
334  size_t startx, deltax;
335  getArraySliceParams(vec_local.size(), startx, deltax);
336  vec_local.resize(deltax);
337  } else {
338  std::string obj_string;
339  int deltax;
340  receive(deltax, root);
341  vec_local.resize((size_t)deltax);
342 
343  T obj; // temporary object
344  for (size_t ii=0; ii<vec_local.size(); ii++) {
345  std::stringstream objs_stream;
346  receive(obj_string, root);
347 
348  objs_stream << obj_string;
349 
350  objs_stream >> obj;
351  vec_local[ii] = new T(obj);
352  }
353  }
354  }
355  #else
356  template <class T> void scatter(std::vector<T*>& /*vec_local*/, const size_t& root=0) {(void)root;}
357  #endif
358 
359  #ifdef ENABLE_MPI
360 
367  template <class T> void send(const std::vector<T*>& vec_local, const size_t& destination, const int& tag=0);
368  template <class T> void send(const std::vector<T>& vec_local, const size_t& destination, const int& tag=0);
369  #else
370  template <class T> void send(const std::vector<T*>& /*vec_local*/, const size_t& /*destination*/, const int& tag=0) {(void)tag;}
371  template <class T> void send(const std::vector<T>& /*vec_local*/, const size_t& /*destination*/, const int& tag=0) {(void)tag;}
372  #endif
373 
374  #ifdef ENABLE_MPI
375 
382  template <class T> void receive(std::vector<T*>& vec_local, const size_t& source, const int& tag=0);
383  template <class T> void receive(std::vector<T>& vec_local, const size_t& source, const int& tag=0);
384  #else
385  template <class T> void receive(std::vector<T*>& /*vec_local*/, const size_t& /*source*/, const int& tag=0) {(void)tag;}
386  template <class T> void receive(std::vector<T>& /*vec_local*/, const size_t& /*source*/, const int& tag=0) {(void)tag;}
387  #endif
388 
389  #ifdef ENABLE_MPI
390 
400  template <class T> void gather(std::vector<T*>& vec_local, const size_t& root = 0)
401  {
402  if (size_ <= 1) return;
403 
404  std::vector<int> vec_sizes;
405  const size_t sum = vec_local.size();
406 
407  gather(vec_local.size(), vec_sizes); //global offsets
408  allreduce_sum(sum); //global amount of T*
409 
410  if (rank_ == root) {
411  std::string obj_string;
412  size_t local_sum = vec_local.size();
413 
414  vec_local.resize(sum);
415 
416  T obj; //temporary object
417  for (size_t ii=1; ii<vec_sizes.size(); ii++) {//HACK: Assuming master is always rank 0
418  for (size_t jj=0; jj<vec_sizes[ii]; jj++) {
419  std::stringstream objs_stream;
420  receive(obj_string, ii);
421 
422  objs_stream << obj_string;
423  objs_stream >> obj;
424  vec_local[local_sum+jj] = new T(obj);
425  }
426  local_sum += vec_sizes[ii];
427  }
428 
429  } else {
430  for (size_t ii=0; ii<vec_local.size(); ii++) {
431  std::string obj_string;
432  std::stringstream objs_stream;
433 
434  objs_stream << *(vec_local[ii]);
435  obj_string.insert(0, objs_stream.str());
436  send(obj_string, root);
437  }
438  }
439  }
440  #else
441  template <class T> void gather(std::vector<T*>& /*vec_local*/, const size_t& root=0) {(void)root;}
442  #endif
443 
451  void getArraySliceParams(const size_t& dimx, size_t& startx_sub, size_t& nx_sub) const {getArraySliceParams(dimx, size_, rank_, startx_sub, nx_sub);}
452  void getArraySliceParams(const size_t& dimx, const size_t& idx_wk, size_t& startx_sub, size_t& nx_sub) const {getArraySliceParams(dimx, size_, idx_wk, startx_sub, nx_sub);}
453 
454  void getArraySliceParamsOptim(const size_t& dimx, size_t& startx_sub, size_t& nx_sub,const mio::DEMObject& dem, const mio::Grid2DObject& landuse)
455  {getArraySliceParamsOptim(dimx, rank_, startx_sub, nx_sub, dem, landuse);}
456 
457  void getArraySliceParamsOptim(const size_t& dimx, const size_t& idx_wk, size_t& startx_sub, size_t& nx_sub,const mio::DEMObject& dem, const mio::Grid2DObject& landuse);
458 
469  static void getArraySliceParams(const size_t& dimx, const size_t& nbworkers, const size_t& idx_wk, size_t& startx_sub, size_t& nx_sub);
470 
480  static void getArraySliceParams(const size_t& dimx, const size_t& nbworkers, std::vector<size_t>& offset, std::vector<size_t>& nx);
481 
482  private:
483  MPIControl();
484  ~MPIControl();
485 
486  MPIControl(MPIControl const&); //No implementation allowed - singleton class
487  void operator=(MPIControl const&); //No implementation allowed - singleton class
488  void broadcast(std::string& message, const size_t& root);
489 
490  void send(std::string& message, const size_t& recipient, const int& tag=0);
491  void receive(std::string& message, const size_t& source, const int& tag=0);
492  void send(const int& value, const size_t& recipient, const int& tag=0);
493  void receive(int& value, const size_t& source, const int& tag=0);
494 
495  static void checkSuccess(const int& ierr);
496 
497  size_t rank_; // the rank of this process
498  size_t size_; // the number of all processes
499  std::string name_; // the name of the node this process is running on
500 };
501 
502 #endif
void getArraySliceParams(const size_t &dimx, const size_t &idx_wk, size_t &startx_sub, size_t &nx_sub) const
Definition: MPIControl.h:452
void allreduce_sum(double &value)
Definition: MPIControl.cc:221
A singleton class that deals with all aspects of parallelization within Alpine3D (MPI, OpenMP, PETSc) Any class that wishes to utilize the MPI/OpenMP/PETSc capabilities should include this header and make sure that the respective cmake compilation options (called MPI, OPENMP, PETSC) are activated. The methods are designed in a transparent way also returning meaningful values if the compilation options were not activated, e. g. bool master() will return true if MPI was not activated and if it was activated then it will return false on all processes that are not master and true solely for the master process.
Definition: MPIControl.h:43
bool openmp() const
Definition: MPIControl.cc:48
bool master() const
Definition: MPIControl.cc:95
size_t master_rank() const
Definition: MPIControl.cc:80
void broadcast(T &obj, const size_t &root=0)
Broadcast an object via MPI or in case MPI is not activated don&#39;t do anything.
Definition: MPIControl.h:235
void allreduce_sum(T &obj)
Adds up the values of type T from all processes and distributes the sum back to all processes...
Definition: MPIControl.h:199
size_t rank() const
Definition: MPIControl.cc:75
static void op_sum_func(void *in, void *out, int *, MPI_Datatype *datatype)
Definition: MPIControl.h:177
void broadcast(std::vector< T > &vec_obj, const size_t &root=0)
Broadcast a vector of class T objects via MPI or in case MPI is not activated don&#39;t do anything...
Definition: MPIControl.h:267
void gather(const int &send_value, std::vector< int > &receive_vector, const size_t &root=0)
Definition: MPIControl.cc:195
void scatter(std::vector< T *> &vec_local, const size_t &root=0)
Scatter the objects pointed to by vector<T*> by slices to all preocesses. Internally no MPI_Scatterv ...
Definition: MPIControl.h:313
void allreduce_min(double &value)
Definition: MPIControl.cc:214
static void deserialize(const void *in, const size_t &len, T &obj)
Definition: MPIControl.h:140
void barrier() const
Definition: MPIControl.cc:235
void getArraySliceParams(const size_t &dimx, size_t &startx_sub, size_t &nx_sub) const
Helper to split an array for domain decomposition. Given the overall size of an array calculate a sta...
Definition: MPIControl.h:451
void receive(std::vector< T *> &vec_local, const size_t &source, const int &tag=0)
Receive vector of objects from process #source.
Definition: MPIControl.cc:310
static size_t serialize(void **out, const T &obj, const bool alloc=false)
Definition: MPIControl.h:155
void gather(std::vector< T *> &vec_local, const size_t &root=0)
Gathers the objects pointed to by vector<T*> from all processes into a vector<T*> on the root node...
Definition: MPIControl.h:400
size_t max_threads() const
Definition: MPIControl.cc:66
static MPIControl & instance()
Definition: MPIControl.cc:405
void send(const std::vector< T *> &vec_local, const size_t &destination, const int &tag=0)
Send the objects pointed to by vector<T*> to process #destination.
Definition: MPIControl.cc:287
std::string name() const
Definition: MPIControl.cc:90
void getArraySliceParamsOptim(const size_t &dimx, size_t &startx_sub, size_t &nx_sub, const mio::DEMObject &dem, const mio::Grid2DObject &landuse)
Definition: MPIControl.h:454
void allreduce_max(double &value)
Definition: MPIControl.cc:207
size_t size() const
Definition: MPIControl.cc:85
size_t thread() const
Definition: MPIControl.cc:57