13 static MPI_Datatype type() {
return MPI_BYTE; }
18 static MPI_Datatype type() {
return MPI_DOUBLE;}
23 static MPI_Datatype type() {
return MPI_INT;}
28 static MPI_Datatype type() {
return MPI_UNSIGNED_LONG;}
31 template <
typename T,
typename SFINAE =
void>
36 struct has_data<T, std::void_t<decltype(std::declval<T>().data())>> : std::true_type {
39 template <
typename T,
typename SFINAE =
void>
44 struct has_size<T, std::void_t<decltype(std::declval<T>().size())>> : std::true_type {
50 int getRank(MPI_Comm comm = MPI_COMM_WORLD)
53 MPI_Comm_rank(comm, &rank);
61 MPI_Comm_size(comm, &nranks);
76 T& local, T& global, MPI_Op operation, MPI_Comm comm = MPI_COMM_WORLD)
92 T& local, T& global, MPI_Op operation, MPI_Comm comm = MPI_COMM_WORLD)
105 template <
typename T>
107 T& buf,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD)
119 template <
typename T>
121 T& buf,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD)
136 template <
typename T>
137 int Allgatherv(T& buf, T& values_on_rank, std::vector<int>& size_on_rank, std::vector<int>& offsets_on_rank,
138 MPI_Comm comm = MPI_COMM_WORLD)
141 values_on_rank.data(), size_on_rank.data(), offsets_on_rank.data(),
156 template <
typename T>
157 int Gatherv(T& buf, T& values_on_rank, std::vector<int>& size_on_rank, std::vector<int>& offsets_on_rank,
int root = 0,
158 MPI_Comm comm = MPI_COMM_WORLD)
161 values_on_rank.data(), size_on_rank.data(), offsets_on_rank.data(),
174 template <
typename T>
175 int Scatterv(T& sendbuf, std::vector<int>& variables_per_rank, std::vector<int>& offsets, T& recvbuff,
int root = 0,
176 MPI_Comm comm = MPI_COMM_WORLD)
179 assert(static_cast<typename T::size_type>(
180 [&]() {
return variables_per_rank[
static_cast<std::size_t
>(
mpi::getRank(comm))]; }()) == recvbuff.size());
182 return MPI_Scatterv(sendbuf.data(), variables_per_rank.data(), offsets.data(),
196 template <
typename T>
197 int Irecv(T& buf,
int send_rank, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD)
200 send_rank, tag, comm, request);
213 template <
typename T>
214 int Isend(T& buf,
int recv_rank, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD)
217 recv_rank, tag, comm, request);
227 int Waitall(std::vector<MPI_Request>& requests, std::vector<MPI_Status>& status)
229 return MPI_Waitall(static_cast<int>(requests.size()), requests.data(), status.data());
232 int CreateAndSetErrorHandler(MPI_Errhandler& newerr,
void (*err)(MPI_Comm* comm,
int* err, ...),
233 MPI_Comm comm = MPI_COMM_WORLD)
235 MPI_Comm_create_errhandler(err, &newerr);
236 return MPI_Comm_set_errhandler(comm, newerr);
int getRank(MPI_Comm comm=MPI_COMM_WORLD)
Get rank.
int Waitall(std::vector< MPI_Request > &requests, std::vector< MPI_Status > &status)
A wrapper to MPI_Waitall to wait for all the requests to be fulfilled.
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Broadcast(T &buf, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast a single element to all ranks on the communicator.
int Irecv(T &buf, int send_rank, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Recieve a buffer from a specified rank and create a handle for the MPI_Request.
int Allgatherv(T &buf, T &values_on_rank, std::vector< int > &size_on_rank, std::vector< int > &offsets_on_rank, MPI_Comm comm=MPI_COMM_WORLD)
gathers a local collections from all ranks on all ranks on a communicator
int Gatherv(T &buf, T &values_on_rank, std::vector< int > &size_on_rank, std::vector< int > &offsets_on_rank, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
gathers a local collections from all ranks only on the root rank
int getNRanks(MPI_Comm comm=MPI_COMM_WORLD)
Get number of ranks.
int Isend(T &buf, int recv_rank, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Send a buffer to a specified rank and create a handle for the MPI_Request.
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Allreduce(T &local, T &global, MPI_Op operation, MPI_Comm comm=MPI_COMM_WORLD)
All reduce a single element across all ranks in a communicator.
int Scatterv(T &sendbuf, std::vector< int > &variables_per_rank, std::vector< int > &offsets, T &recvbuff, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
MPI_Scatterv on std::collections. Send only portions of buff to ranks.