gambit is hosted by Hepforge, IPPP Durham
GAMBIT  v1.5.0-2191-ga4742ac
a Global And Modular Bsm Inference Tool
mpiwrapper.hpp
Go to the documentation of this file.
1 // GAMBIT: Global and Modular BSM Inference Tool
2 // *********************************************
40 
41 // The WITH_MPI macro determines whether MPI is used or not.
42 // It is defined at compile time by cmake with -DWITH_MPI.
43 // The contents of this file are ignored if MPI is disabled.
44 
45 #ifdef WITH_MPI
46 #ifndef __mpiwrapper_hpp__
47 #define __mpiwrapper_hpp__
48 
49 #include <sstream>
50 #include <iostream>
51 #include <type_traits>
52 #include <chrono>
53 
55 #include <mpi.h>
57 
58 
59 #include <boost/utility/enable_if.hpp>
60 
64 
65 // I wanted to keep the GAMBIT logger separate from this code so that it
66 // would be more streamlined for using elsewhere. But the logger is very
67 // useful for debugging, so this preprocessor flag can be used to turn
68 // it on and off
69 // Though it might already be included via the error handlers anyway.
70 #define USE_GAMBIT_LOGGER
71 
72 #ifdef USE_GAMBIT_LOGGER
73  #include "gambit/Logs/logger.hpp"
74  #define LOGGER logger() << LogTags::utils << LogTags::info
75 #else
76  #define LOGGER std::cerr
77  #define EOM std::endl
78 #endif
79 
80 
82 #define SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(TYPEDEFD_TYPE, RETURN_MPI_TYPE) \
83  template<typename T> \
84  struct get_mpi_data_type<T, typename boost::enable_if_c< std::is_same<T, TYPEDEFD_TYPE>::value && \
85  !std::is_same<char, TYPEDEFD_TYPE>::value && \
86  !std::is_same<short, TYPEDEFD_TYPE>::value && \
87  !std::is_same<int, TYPEDEFD_TYPE>::value && \
88  !std::is_same<long, TYPEDEFD_TYPE>::value && \
89  !std::is_same<long long, TYPEDEFD_TYPE>::value && \
90  !std::is_same<unsigned char, TYPEDEFD_TYPE>::value && \
91  !std::is_same<unsigned short, TYPEDEFD_TYPE>::value && \
92  !std::is_same<unsigned int, TYPEDEFD_TYPE>::value && \
93  !std::is_same<unsigned long, TYPEDEFD_TYPE>::value && \
94  !std::is_same<unsigned long long, TYPEDEFD_TYPE>::value && \
95  !std::is_same<float, TYPEDEFD_TYPE>::value && \
96  !std::is_same<double, TYPEDEFD_TYPE>::value && \
97  !std::is_same<long double, TYPEDEFD_TYPE>::value && \
98  !std::is_same<bool, TYPEDEFD_TYPE>::value>::type > \
99  { \
100  static MPI_Datatype type() { return RETURN_MPI_TYPE; } \
101  }; \
102 
103 //#define MPI_MSG_DEBUG
105 
106 namespace Gambit
107 {
108 
109  namespace GMPI
110  {
111 
116  template<typename T, typename Enable=void>
117  struct get_mpi_data_type;
118 
120  template<typename T, size_t SIZE>
121  struct get_mpi_data_type<T[SIZE]> { static MPI_Datatype type() { return get_mpi_data_type<T>::type(); } };
122 
125  template<> struct get_mpi_data_type<char> { static MPI_Datatype type() { return MPI_CHAR; } };
126  template<> struct get_mpi_data_type<short> { static MPI_Datatype type() { return MPI_SHORT; } };
127  template<> struct get_mpi_data_type<int> { static MPI_Datatype type() { return MPI_INT; } };
128  template<> struct get_mpi_data_type<long> { static MPI_Datatype type() { return MPI_LONG; } };
129  template<> struct get_mpi_data_type<long long> { static MPI_Datatype type() { return MPI_LONG_LONG; } };
130  template<> struct get_mpi_data_type<unsigned char> { static MPI_Datatype type() { return MPI_UNSIGNED_CHAR; } };
131  template<> struct get_mpi_data_type<unsigned short> { static MPI_Datatype type() { return MPI_UNSIGNED_SHORT; } };
132  template<> struct get_mpi_data_type<unsigned int> { static MPI_Datatype type() { return MPI_UNSIGNED; } };
133  template<> struct get_mpi_data_type<unsigned long> { static MPI_Datatype type() { return MPI_UNSIGNED_LONG; } };
134  template<> struct get_mpi_data_type<unsigned long long>{ static MPI_Datatype type() { return MPI_UNSIGNED_LONG_LONG; } };
135  template<> struct get_mpi_data_type<float> { static MPI_Datatype type() { return MPI_FLOAT; } };
136  template<> struct get_mpi_data_type<double> { static MPI_Datatype type() { return MPI_DOUBLE; } };
137  template<> struct get_mpi_data_type<long double> { static MPI_Datatype type() { return MPI_LONG_DOUBLE; } };
139 
143  #ifdef MPI_INT8_T
144  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(int8_t, MPI_INT8_T )
145  #endif
146  #ifdef MPI_UINT8_T
147  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(uint8_t, MPI_UINT8_T )
148  #endif
149  #ifdef MPI_INT16_T
150  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(int16_t, MPI_INT16_T )
151  #endif
152  #ifdef MPI_UINT16_T
153  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(uint16_t, MPI_UINT16_T)
154  #endif
155  #ifdef MPI_INT32_T
156  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(int32_t, MPI_INT32_T )
157  #endif
158  #ifdef MPI_UINT32_T
159  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(uint32_t, MPI_UINT32_T)
160  #endif
161  #ifdef MPI_INT64_T
162  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(int64_t, MPI_INT64_T )
163  #endif
164  #ifdef MPI_UINT64_T
165  SPECIALISE_MPI_DATA_TYPE_IF_NEEDED(uint64_t, MPI_UINT64_T)
166  #endif
167 
170  class EXPORT_SYMBOLS Comm
171  {
172  public:
174  Comm();
175 
177  Comm(const MPI_Comm& comm, const std::string& name);
178 
181  Comm(std::vector<int> processes, const std::string& name);
182 
184  ~Comm();
185 
187  Comm spawn_new(const std::vector<int>& processes, const std::string& name);
188 
190  void check_for_undelivered_messages();
191 
194  void dup(const MPI_Comm& comm, const std::string& newname);
195 
197  int Get_size() const;
198 
200  int Get_rank() const;
201 
203  std::string Get_name() const;
204 
206  void Barrier()
207  {
208  #ifdef MPI_MSG_DEBUG
209  std::cout<<"rank "<<Get_rank()<<": Barrier() called"<<std::endl;
210  #endif
211 
212  int errflag;
213  errflag = MPI_Barrier(boundcomm);
214  if(errflag!=0) {
215  std::ostringstream errmsg;
216  errmsg << "Error performing MPI_Barrier! Received error flag: "<<errflag;
217  utils_error().raise(LOCAL_INFO, errmsg.str());
218  }
219 
220  #ifdef MPI_MSG_DEBUG
221  std::cout<<"rank "<<Get_rank()<<": Barrier() passed"<<std::endl;
222  #endif
223  }
224 
234  void Recv(void *buf /*out*/, int count, MPI_Datatype datatype,
235  int source, int tag,
236  MPI_Status *in_status=NULL /*out*/)
237  {
238  #ifdef MPI_MSG_DEBUG
239  std::cout<<"rank "<<Get_rank()<<": Recv() called (count="<<count<<", source="<<source<<", tag="<<tag<<")"<<std::endl;
240  #endif
241  int errflag;
242  errflag = MPI_Recv(buf, count, datatype, source, tag, boundcomm, in_status == NULL ? MPI_STATUS_IGNORE : in_status);
243  if(errflag!=0)
244  {
245  std::ostringstream errmsg;
246  errmsg << "Error performing MPI_Recv! Received error flag: "<<errflag;
247  utils_error().raise(LOCAL_INFO, errmsg.str());
248  }
249  #ifdef MPI_MSG_DEBUG
250  std::cout<<"rank "<<Get_rank()<<": Recv() finished "<<std::endl;
251  #endif
252  }
253 
255  template<class T>
256  void Recv(T *buf /*out*/, int count,
257  int source, int tag,
258  MPI_Status *status=NULL /*out*/)
259  {
260  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
261  Recv(buf, count, datatype, source, tag, status);
262  }
263 
265  void Send(void *buf, int count, MPI_Datatype datatype,
266  int destination, int tag)
267  {
268  #ifdef MPI_MSG_DEBUG
269  std::cout<<"rank "<<Get_rank()<<": Send() called (count="<<count<<", destination="<<destination<<", tag="<<tag<<")"<<std::endl;
270  #endif
271  int errflag;
272  errflag = MPI_Send(buf, count, datatype, destination, tag, boundcomm);
273  if(errflag!=0) {
274  std::ostringstream errmsg;
275  errmsg << "Error performing MPI_Send! Received error flag: "<<errflag;
276  utils_error().raise(LOCAL_INFO, errmsg.str());
277  }
278  #ifdef MPI_MSG_DEBUG
279  std::cout<<"rank "<<Get_rank()<<": Send() finished"<<std::endl;
280  #endif
281  }
282 
284  void Ssend(void *buf, int count, MPI_Datatype datatype,
285  int destination, int tag)
286  {
287  #ifdef MPI_MSG_DEBUG
288  std::cout<<"rank "<<Get_rank()<<": Ssend() called (count="<<count<<", destination="<<destination<<", tag="<<tag<<")"<<std::endl;
289  #endif
290  int errflag;
291  errflag = MPI_Ssend(buf, count, datatype, destination, tag, boundcomm);
292  if(errflag!=0) {
293  std::ostringstream errmsg;
294  errmsg << "Error performing MPI_Ssend! Received error flag: "<<errflag;
295  utils_error().raise(LOCAL_INFO, errmsg.str());
296  }
297  #ifdef MPI_MSG_DEBUG
298  std::cout<<"rank "<<Get_rank()<<": Ssend() finished"<<std::endl;
299  #endif
300  }
301 
303  template<class T>
304  void Send(T *buf, int count,
305  int destination, int tag)
306  {
307  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
308  Send(buf, count, datatype, destination, tag);
309  }
310 
312  template<class T>
313  void Ssend(T *buf, int count,
314  int destination, int tag)
315  {
316  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
317  Ssend(buf, count, datatype, destination, tag);
318  }
319 
321  void Isend(void *buf, int count, MPI_Datatype datatype,
322  int destination, int tag,
323  MPI_Request *request /*out*/)
324  {
325  #ifdef MPI_MSG_DEBUG
326  std::cerr<<"rank "<<Get_rank()<<": Isend() called (count="<<count<<", destination="<<destination<<", tag="<<tag<<")"<<std::endl;
327  #endif
328  int errflag;
329  errflag = MPI_Isend(buf, count, datatype, destination, tag, boundcomm, request);
330  if(errflag!=0) {
331  std::ostringstream errmsg;
332  errmsg << "Error performing MPI_Isend! Received error flag: "<<errflag;
333  utils_error().raise(LOCAL_INFO, errmsg.str());
334  }
335  }
336 
338  template<class T>
339  void Isend(T *buf, int count,
340  int destination, int tag,
341  MPI_Request *request /*out*/)
342  {
343  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
344  Isend(buf, count, datatype, destination, tag, request);
345  }
346 
348  void Wait(MPI_Request *request)
349  {
350  MPI_Status status;
351  int errflag = MPI_Wait(request, &status);
352  if(errflag!=0) {
353  std::ostringstream errmsg;
354  errmsg << "Error performing MPI_Wait! Received error flag: "<<errflag;
355  utils_error().raise(LOCAL_INFO, errmsg.str());
356  }
357  }
358 
359  // Non-blocking probe for messages waiting to be delivered
360  bool Iprobe(int source, int tag, MPI_Status* in_status=NULL /*out*/)
361  {
362  //#ifdef MPI_MSG_DEBUG
363  //std::cout<<"rank "<<Get_rank()<<": Iprobe() called (source="<<source<<", tag="<<tag<<")"<<std::endl;
364  //#endif
365  int errflag;
366  int you_have_mail; // C does not have a bool type...
367  MPI_Status def_status;
368  MPI_Status* status;
369  if(in_status!=NULL) {
370  status = in_status;
371  } else {
372  status = &def_status;
373  }
374  //MPI_Iprobe(source, 1, boundcomm, &you_have_mail, status);
375  errflag = MPI_Iprobe(source, tag, boundcomm, &you_have_mail, status);
376  if(errflag!=0) {
377  std::ostringstream errmsg;
378  errmsg << "Error performing MPI_Iprobe! Received error flag: "<<errflag;
379  utils_error().raise(LOCAL_INFO, errmsg.str());
380  }
381  #ifdef MPI_MSG_DEBUG
382  if(you_have_mail!=0) {
383  std::cerr<<"rank "<<Get_rank()<<": Iprobe: Message waiting from process "<<status->MPI_SOURCE<<std::endl;
384  }
385  #endif
386  return (you_have_mail != 0);
387  }
388 
389  // Blocking probe for a message. Doesn't return until matching message found.
390  // No point having default NULL status this time, because the only reason to
391  // use this function is to inspect the message status.
392  void Probe(int source, int tag, MPI_Status* status)
393  {
394  int errflag;
395  errflag = MPI_Probe(source, tag, boundcomm, status);
396  if(errflag!=0) {
397  std::ostringstream errmsg;
398  errmsg << "Error performing MPI_Probe! Received error flag: "<<errflag;
399  utils_error().raise(LOCAL_INFO, errmsg.str());
400  }
401  }
402 
403  // Perform an Isend to all other processes
404  // (using templated non-blocking send repeatedly)
405  template<class T>
406  void IsendToAll(T *buf, int count, int tag,
407  MPI_Request *in_req=NULL /*out*/)
408  {
409  MPI_Request def_req;
410  MPI_Request* req;
411  if(in_req!=NULL) {
412  req = in_req;
413  } else {
414  req = &def_req;
415  }
416  int rank = Get_rank();
417  int size = Get_size();
418  for(int i=0; i<size; i++)
419  {
420  if(i!=rank) Isend(buf, count, i, tag, req);
421  }
422  }
423 
424  template <typename T>
425  void Bcast (std::vector<T>& buffer, int count, int root)
426  {
427  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
428 
429  MPI_Bcast (&buffer[0], count, datatype, root, boundcomm);
430  }
431 
432  template<typename T>
433  void Scatter (std::vector<T> &sendbuf, T &recvbuf, int root)
434  {
435  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
436 
437  MPI_Scatter (&sendbuf[0], 1, datatype, &recvbuf, 1, datatype, root, boundcomm);
438  }
439 
440  template<typename T>
441  void Allreduce (T &sendbuf, T &recvbuf, MPI_Op op)
442  {
443  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
444 
445  MPI_Allreduce (&sendbuf, &recvbuf, 1, datatype, op, boundcomm);
446  }
447 
448  template<typename T>
449  void Gather(std::vector<T> &sendbuf, std::vector<T> &recvbuf, int root)
450  {
451  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
452 
453  int sendcount = sendbuf.size();
454  int recvcount = sendbuf.size();
455  //std::cerr<<"rank "<<Get_rank()<<": Gather pars: sendcount="<<sendcount<<std::endl;
456  if(Get_rank()==0)
457  {
458  if(recvbuf.size()<sendbuf.size()*Get_size())
459  {
460  std::ostringstream errmsg;
461  errmsg << "Error performing Gather! Recv buffer is not big enough to fit the expected data! We expect "<<Get_size()<<" messages of count "<<recvcount<<", (total size="<<recvcount*Get_size()<<") but the recv buffer only has size "<<recvbuf.size()<<"!";
462  utils_error().raise(LOCAL_INFO, errmsg.str());
463  }
464  }
465  int errflag = MPI_Gather(&sendbuf[0], sendcount, datatype,
466  &recvbuf[0], recvcount, datatype,
467  root, boundcomm);
468  if(errflag!=0) {
469  std::ostringstream errmsg;
470  errmsg << "Error performing Gather! Received error flag: "<<errflag;
471  utils_error().raise(LOCAL_INFO, errmsg.str());
472  }
473  }
474 
475  template<typename T>
476  void Gatherv(std::vector<T> &sendbuf, std::vector<T> &recvbuf, std::vector<int> recvcounts, int root)
477  {
478  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
479 
480  //std::cerr<<"rank "<<Get_rank()<<": Gatherv pars: recvcounts="<<recvcounts<<std::endl;
481 
482  // We will automatically calculate the displacements assuming that the incoming
483  // data should just be stacked in the order of the process ranks
484  std::vector<int> displs;
485  displs.push_back(0);
486  std::size_t totalsize = 0;
487  for(auto it=recvcounts.begin(); it!=recvcounts.end(); ++it)
488  {
489  if(std::next(it)!=recvcounts.end()) displs.push_back(*it + displs.back());
490  totalsize += (*it);
491  }
492  if(Get_rank()==0)
493  {
494  if(recvbuf.size()<totalsize)
495  {
496  std::ostringstream errmsg;
497  errmsg << "Error performing Gatherv! Recv buffer is not big enough to fit the expected data! We expect messages with total size "<<totalsize<<" but the recv buffer only has size "<<recvbuf.size()<<"!";
498  utils_error().raise(LOCAL_INFO, errmsg.str());
499  }
500  }
501  //std::cerr<<"rank "<<Get_rank()<<": sendbuf.size()="<<sendbuf.size()<<", recvbuf.size()="<<recvbuf.size()<<", "<<"recvcounts="<<recvcounts<<", displs="<<displs<<std::endl;
502  int sendcount = sendbuf.size();
503  int errflag = MPI_Gatherv(&sendbuf[0], sendcount, datatype,
504  &recvbuf[0], &recvcounts[0], &displs[0],
505  datatype, root, boundcomm);
506  if(errflag!=0) {
507  std::ostringstream errmsg;
508  errmsg << "Error performing MPI_Gatherv! Received error flag: "<<errflag;
509  utils_error().raise(LOCAL_INFO, errmsg.str());
510  }
511  }
512 
513  template<typename T>
514  void AllGather(std::vector<T> &sendbuf, std::vector<T> &recvbuf)
515  {
516  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
517 
518  int sendcount = sendbuf.size();
519  int recvcount = sendbuf.size();
520 
521  if(recvbuf.size()<sendbuf.size()*Get_size())
522  {
523  std::ostringstream errmsg;
524  errmsg << "Error performing AllGather! Recv buffer is not big enough to fit the expected data! We expect "<<Get_size()<<" messages of count "<<recvcount<<", (total size="<<recvcount*Get_size()<<") but the recv buffer only has size "<<recvbuf.size()<<"!";
525  utils_error().raise(LOCAL_INFO, errmsg.str());
526  }
527 
528  int errflag = MPI_Allgather(&sendbuf[0], sendcount, datatype,
529  &recvbuf[0], recvcount, datatype, boundcomm);
530 
531  if(errflag!=0) {
532  std::ostringstream errmsg;
533  errmsg << "Error performing MPI_Allgather! Received error flag: "<<errflag;
534  utils_error().raise(LOCAL_INFO, errmsg.str());
535  }
536  }
537 
538 
539  template<typename T>
540  void AllGatherv(std::vector<T> &sendbuf, std::vector<T> &recvbuf, std::vector<int> recvcounts)
541  {
542  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
543 
544  // We will automatically calculate the displacements assuming that the incoming
545  // data should just be stacked in the order of the process ranks
546  std::vector<int> displs(Get_size());
547  displs.push_back(0);
548  for(int i=0; i<(recvcounts.size()-1); i++)
549  {
550  displs.push_back(i);
551  }
552 
553  int sendcount = sendbuf.size();
554  int errflag = MPI_Allgatherv(&sendbuf[0], sendcount, datatype,
555  &recvbuf[0], &recvcounts, &displs,
556  datatype, boundcomm);
557  if(errflag!=0) {
558  std::ostringstream errmsg;
559  errmsg << "Error performing MPI_Allgatherv! Received error flag: "<<errflag;
560  utils_error().raise(LOCAL_INFO, errmsg.str());
561  }
562  }
563 
564  // Force all processes in this group (possibly all processes in
565  // the "WORLD"; implementation dependent) to stop executing.
566  // Useful for abnormal termination (since if one processes throws
567  // an exception then the others can easily get stuck waiting
568  // for messages that will never arrive).
569  void Abort()
570  {
571  std::cerr << "rank "<<Get_rank()<<": Issuing MPI_Abort command, attempting to terminate all processes..." << std::endl;
572  MPI_Abort(boundcomm, 1);
573  }
574 
576  void masterWaitForAll(int tag);
577 
579  void allWaitForMaster(int tag);
580 
582  void allWaitForMasterWithFunc(int tag, void (*func)());
583 
588  bool BarrierWithTimeout(const std::chrono::duration<double> timeout, const int tag);
589 
594  bool BarrierWithCommonTimeout(std::chrono::duration<double> timeout,
595  const int tag_entered,
596  const int tag_timeleft);
597 
601  void check_for_unreceived_messages(int timeout);
602 
607  template<class T>
608  void Recv_all(T* buffer, int size, int source, int tag, int max_loops)
609  {
610  int loop = 0;
611 
612  MPI_Status status;
613  while(loop<max_loops and Iprobe(source, tag, &status))
614  {
615  #ifdef SIGNAL_DEBUG
616  LOGGER << "Detected message from process "<<status.MPI_SOURCE<<" with tag "<<status.MPI_TAG<<"; doing Recv" << EOM;
617  #endif
618  MPI_Status recv_status;
619  Recv(buffer, size, status.MPI_SOURCE, status.MPI_TAG, &recv_status);
620  #ifdef SIGNAL_DEBUG
621  LOGGER << "Received message from process "<<status.MPI_SOURCE<<" with tag "<<status.MPI_TAG<<". Discarding any existing message in the output buffer as obsolete..." << EOM;
622  #endif
623  ++loop;
624  }
625 
626  if(loop==max_loops)
627  {
628  std::ostringstream errmsg;
629  errmsg << "Error while attempting to clean out unreceived messages from other processes! Received maximum allowed number of messages ("<<loop<<", note that MPI size is "<<Get_size()<<")";
630  utils_error().raise(LOCAL_INFO, errmsg.str());
631  }
632 
633  if(loop>0) LOGGER << "Communicator '"<<myname<<"' received "<<loop<<" messages with tag "<<tag<<". Only the last of these will be readable from the output buffer, the rest were discarded."<<EOM;
634  }
635 
637  int mytag = 1;
638 
640  MPI_Comm* get_boundcomm() { return &boundcomm; }
641 
643  long int MasterPID();
644 
646  void set_MasterPID(long int p);
647 
648  private:
649 
651  MPI_Comm boundcomm;
652 
654  std::string myname;
655 
657  static long int pid;
658  };
659 
661  EXPORT_SYMBOLS bool Is_initialized();
662 
664  EXPORT_SYMBOLS void Init();
665 
667  EXPORT_SYMBOLS bool Is_finalized();
668 
670  EXPORT_SYMBOLS void Finalize();
671 
673  EXPORT_SYMBOLS bool PrepareForFinalizeWithTimeout(bool use_mpi_abort);
674 
678  template<class T>
679  int Get_count(MPI_Status *status)
680  {
681  static const MPI_Datatype datatype = get_mpi_data_type<T>::type();
682  int msgsize;
683  MPI_Get_count(status, datatype, &msgsize);
684  if(msgsize<0)
685  {
686  std::ostringstream errmsg;
687  errmsg << "Error performing MPI_Get_count! Message size returned negative (value was "<<msgsize<<")! This can happen if the number of bytes received is not a multiple of the size of the specified MPI_Datatype. In other words you may have specified a type that doesn't match the type of the sent message; please double-check this.";
688  utils_error().raise(LOCAL_INFO, errmsg.str());
689  }
690  return msgsize;
691  }
692 
694 
696  class MpiIniFunc
697  {
698  private:
699  std::string location;
700  std::string name;
701  void (*func)();
702  public:
703  MpiIniFunc(const std::string& l, const std::string& n, void(*f)())
704  : location(l)
705  , name(n)
706  , func(f)
707  {}
708  void runme()
709  {
710  (*func)();
711  }
712  std::string mylocation(){return location;}
713  std::string myname (){return name;}
714  };
715 
721  // struct AddMpiIniFunc {
722  // AddMpiIniFunc(const std::string& local_info, const std::string& name, void(*func)());
723  // };
724 
726 
727  }
728 }
729 
730 
731 #endif // include guard
732 #endif // MPI
733 
DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry DecayTable::Entry double
General small utility macros.
EXPORT_SYMBOLS error & utils_error()
Utility errors.
LOCAL_INFO macro.
#define EXPORT_SYMBOLS
#define LOCAL_INFO
Definition: local_info.hpp:34
Logging access header for GAMBIT.
const Logging::endofmessage EOM
Explicit const instance of the end of message struct in Gambit namespace.
Definition: logger.hpp:100
Funk func(double(*f)(funcargs...), Args... args)
Definition: daFunk.hpp:768
Simple header file for turning compiler warnings back on after having included one of the begin_ignor...
hb_ModelParameters void
DS5_MSPCTM DS_INTDOF int
Pragma directives to suppress compiler warnings coming from including MPI library headers...
Exception objects required for standalone compilation.
TODO: see if we can use this one:
Definition: Analysis.hpp:33