29 #include <meteoio/MeteoIO.h> 101 std::string
name()
const;
116 void gather(
const int& send_value, std::vector<int>& receive_vector,
const size_t& root = 0);
140 template<
class T>
static void deserialize(
const void* in,
const size_t& len, T& obj)
142 std::stringstream obj_stream;
143 obj_stream.write(reinterpret_cast<const char*>(in), len);
155 template<
class T>
static size_t serialize(
void** out,
const T& obj,
const bool alloc=
false)
157 std::stringstream obj_stream;
159 const size_t len = obj_stream.str().size();
161 *out =
new char[len];
163 obj_stream.read(reinterpret_cast<char*>(*out), len);
177 template <
class T>
static void op_sum_func(
void* in,
void* out,
int* , MPI_Datatype* datatype)
180 MPI_Type_size(*datatype, &tmp_size);
184 deserialize(in, static_cast<size_t>(tmp_size), in_obj);
185 deserialize(out, static_cast<size_t>(tmp_size), out_obj);
201 if (size_ <= 1)
return;
204 MPI_Op_create(op_sum_func<T>,
true, &op);
206 void* in_obj_char=NULL;
207 void* out_obj_char=NULL;
209 const size_t len =
serialize(&in_obj_char, obj,
true);
210 out_obj_char =
new char[len];
212 MPI_Datatype chars_datatype;
213 MPI_Type_contiguous(static_cast<int>(len), MPI_CHAR, &chars_datatype);
214 MPI_Type_commit(&chars_datatype);
216 MPI_Allreduce(in_obj_char, out_obj_char, 1, chars_datatype, op, MPI_COMM_WORLD);
221 delete[] (
char*)out_obj_char;
222 delete[] (
char*)in_obj_char;
235 template <
class T>
void broadcast(T& obj,
const size_t& root = 0)
237 if (size_ <= 1)
return;
239 std::string obj_string;
242 std::stringstream obj_stream;
244 obj_string.insert(0, obj_stream.str());
250 std::stringstream obj_stream;
251 obj_stream << obj_string;
256 template <
class T>
void broadcast(T& ,
const size_t& root=0) {(void)root;}
267 template <
class T>
void broadcast(std::vector<T>& vec_obj,
const size_t& root = 0)
269 if (size_ <= 1)
return;
271 std::string obj_string;
275 std::stringstream objs_stream;
276 for (
size_t ii=0; ii<vec_obj.size(); ii++)
277 objs_stream << vec_obj[ii];
278 obj_string.insert(0, objs_stream.str());
279 vec_size = vec_obj.size();
288 std::stringstream objs_stream;
289 objs_stream << obj_string;
292 for (
size_t ii=0; ii<vec_size; ii++) {
294 vec_obj.push_back(tmp);
299 template <
class T>
void broadcast(std::vector<T>& ,
const size_t& root=0) {(void)root;}
313 template <
class T>
void scatter(std::vector<T*>& vec_local,
const size_t& root = 0)
315 if (size_ <= 1)
return;
318 for (
size_t ii=1; ii<size_; ii++) {
319 size_t startx, deltax;
322 send((
int)deltax, ii);
323 for (
size_t jj=startx; jj<(startx+deltax); jj++) {
324 std::string obj_string;
325 std::stringstream objs_stream;
327 objs_stream << *(vec_local[jj]);
328 obj_string.insert(0, objs_stream.str());
329 send(obj_string, ii);
330 delete vec_local[jj];
334 size_t startx, deltax;
336 vec_local.resize(deltax);
338 std::string obj_string;
341 vec_local.resize((
size_t)deltax);
344 for (
size_t ii=0; ii<vec_local.size(); ii++) {
345 std::stringstream objs_stream;
348 objs_stream << obj_string;
351 vec_local[ii] =
new T(obj);
356 template <
class T>
void scatter(std::vector<T*>& ,
const size_t& root=0) {(void)root;}
367 template <
class T>
void send(
const std::vector<T*>& vec_local,
const size_t& destination,
const int& tag=0);
368 template <
class T>
void send(
const std::vector<T>& vec_local,
const size_t& destination,
const int& tag=0);
370 template <
class T>
void send(
const std::vector<T*>& ,
const size_t& ,
const int& tag=0) {(void)tag;}
371 template <
class T>
void send(
const std::vector<T>& ,
const size_t& ,
const int& tag=0) {(void)tag;}
382 template <
class T>
void receive(std::vector<T*>& vec_local,
const size_t& source,
const int& tag=0);
383 template <
class T>
void receive(std::vector<T>& vec_local,
const size_t& source,
const int& tag=0);
385 template <
class T>
void receive(std::vector<T*>& ,
const size_t& ,
const int& tag=0) {(void)tag;}
386 template <
class T>
void receive(std::vector<T>& ,
const size_t& ,
const int& tag=0) {(void)tag;}
400 template <
class T>
void gather(std::vector<T*>& vec_local,
const size_t& root = 0)
402 if (size_ <= 1)
return;
404 std::vector<int> vec_sizes;
405 const size_t sum = vec_local.size();
407 gather(vec_local.size(), vec_sizes);
411 std::string obj_string;
412 size_t local_sum = vec_local.size();
414 vec_local.resize(sum);
417 for (
size_t ii=1; ii<vec_sizes.size(); ii++) {
418 for (
size_t jj=0; jj<vec_sizes[ii]; jj++) {
419 std::stringstream objs_stream;
422 objs_stream << obj_string;
424 vec_local[local_sum+jj] =
new T(obj);
426 local_sum += vec_sizes[ii];
430 for (
size_t ii=0; ii<vec_local.size(); ii++) {
431 std::string obj_string;
432 std::stringstream objs_stream;
434 objs_stream << *(vec_local[ii]);
435 obj_string.insert(0, objs_stream.str());
436 send(obj_string, root);
441 template <
class T>
void gather(std::vector<T*>& ,
const size_t& root=0) {(void)root;}
454 void getArraySliceParamsOptim(
const size_t& dimx,
size_t& startx_sub,
size_t& nx_sub,
const mio::DEMObject& dem,
const mio::Grid2DObject& landuse)
457 void getArraySliceParamsOptim(
const size_t& dimx,
const size_t& idx_wk,
size_t& startx_sub,
size_t& nx_sub,
const mio::DEMObject& dem,
const mio::Grid2DObject& landuse);
469 static void getArraySliceParams(
const size_t& dimx,
const size_t& nbworkers,
const size_t& idx_wk,
size_t& startx_sub,
size_t& nx_sub);
480 static void getArraySliceParams(
const size_t& dimx,
const size_t& nbworkers, std::vector<size_t>& offset, std::vector<size_t>& nx);
488 void broadcast(std::string& message,
const size_t& root);
490 void send(std::string& message,
const size_t& recipient,
const int& tag=0);
491 void receive(std::string& message,
const size_t& source,
const int& tag=0);
492 void send(
const int& value,
const size_t& recipient,
const int& tag=0);
493 void receive(
int& value,
const size_t& source,
const int& tag=0);
495 static void checkSuccess(
const int& ierr);
void getArraySliceParams(const size_t &dimx, const size_t &idx_wk, size_t &startx_sub, size_t &nx_sub) const
Definition: MPIControl.h:452
void allreduce_sum(double &value)
Definition: MPIControl.cc:221
A singleton class that deals with all aspects of parallelization within Alpine3D (MPI, OpenMP, PETSc) Any class that wishes to utilize the MPI/OpenMP/PETSc capabilities should include this header and make sure that the respective cmake compilation options (called MPI, OPENMP, PETSC) are activated. The methods are designed in a transparent way also returning meaningful values if the compilation options were not activated, e. g. bool master() will return true if MPI was not activated and if it was activated then it will return false on all processes that are not master and true solely for the master process.
Definition: MPIControl.h:43
bool openmp() const
Definition: MPIControl.cc:48
bool master() const
Definition: MPIControl.cc:95
size_t master_rank() const
Definition: MPIControl.cc:80
void broadcast(T &obj, const size_t &root=0)
Broadcast an object via MPI or in case MPI is not activated don't do anything.
Definition: MPIControl.h:235
void allreduce_sum(T &obj)
Adds up the values of type T from all processes and distributes the sum back to all processes...
Definition: MPIControl.h:199
size_t rank() const
Definition: MPIControl.cc:75
static void op_sum_func(void *in, void *out, int *, MPI_Datatype *datatype)
Definition: MPIControl.h:177
void broadcast(std::vector< T > &vec_obj, const size_t &root=0)
Broadcast a vector of class T objects via MPI or in case MPI is not activated don't do anything...
Definition: MPIControl.h:267
void gather(const int &send_value, std::vector< int > &receive_vector, const size_t &root=0)
Definition: MPIControl.cc:195
void scatter(std::vector< T *> &vec_local, const size_t &root=0)
Scatter the objects pointed to by vector<T*> by slices to all preocesses. Internally no MPI_Scatterv ...
Definition: MPIControl.h:313
void allreduce_min(double &value)
Definition: MPIControl.cc:214
static void deserialize(const void *in, const size_t &len, T &obj)
Definition: MPIControl.h:140
void barrier() const
Definition: MPIControl.cc:235
void getArraySliceParams(const size_t &dimx, size_t &startx_sub, size_t &nx_sub) const
Helper to split an array for domain decomposition. Given the overall size of an array calculate a sta...
Definition: MPIControl.h:451
void receive(std::vector< T *> &vec_local, const size_t &source, const int &tag=0)
Receive vector of objects from process #source.
Definition: MPIControl.cc:310
static size_t serialize(void **out, const T &obj, const bool alloc=false)
Definition: MPIControl.h:155
void gather(std::vector< T *> &vec_local, const size_t &root=0)
Gathers the objects pointed to by vector<T*> from all processes into a vector<T*> on the root node...
Definition: MPIControl.h:400
size_t max_threads() const
Definition: MPIControl.cc:66
static MPIControl & instance()
Definition: MPIControl.cc:405
void send(const std::vector< T *> &vec_local, const size_t &destination, const int &tag=0)
Send the objects pointed to by vector<T*> to process #destination.
Definition: MPIControl.cc:287
std::string name() const
Definition: MPIControl.cc:90
void getArraySliceParamsOptim(const size_t &dimx, size_t &startx_sub, size_t &nx_sub, const mio::DEMObject &dem, const mio::Grid2DObject &landuse)
Definition: MPIControl.h:454
void allreduce_max(double &value)
Definition: MPIControl.cc:207
size_t size() const
Definition: MPIControl.cc:85
size_t thread() const
Definition: MPIControl.cc:57