OP  0.1
OP is a optimization solver plugin package
 All Classes Namespaces Functions Variables Typedefs Enumerations Friends
op_mpi.hpp
1 #pragma once
2 #include <mpi.h>
3 namespace op {
4 
6 namespace mpi {
8 namespace detail {
9 
10 // default template
11 template <typename T>
12 struct mpi_t {
13  static MPI_Datatype type() {return MPI_BYTE; }
14 };
15 
16 template <>
17 struct mpi_t<double> {
18  static MPI_Datatype type() {return MPI_DOUBLE;}
19 };
20 
21 template <>
22 struct mpi_t<int> {
23  static MPI_Datatype type() {return MPI_INT;}
24 };
25 
26 template <>
27 struct mpi_t<unsigned long> {
28  static MPI_Datatype type() {return MPI_UNSIGNED_LONG;}
29 };
30 
31 template <typename T, typename SFINAE = void>
32 struct has_data : std::false_type {
33 };
34 
35 template <typename T>
36 struct has_data<T, std::void_t<decltype(std::declval<T>().data())>> : std::true_type {
37 };
38 
39 template <typename T, typename SFINAE = void>
40 struct has_size : std::false_type {
41 };
42 
43 template <typename T>
44 struct has_size<T, std::void_t<decltype(std::declval<T>().size())>> : std::true_type {
45 };
46 
47 } // namespace detail
48 
50 int getRank(MPI_Comm comm = MPI_COMM_WORLD)
51 {
52  int rank;
53  MPI_Comm_rank(comm, &rank);
54  return rank;
55 }
56 
58 int getNRanks(MPI_Comm comm = MPI_COMM_WORLD)
59 {
60  int nranks;
61  MPI_Comm_size(comm, &nranks);
62  return nranks;
63 }
64 
74 template <typename T>
75 std::enable_if_t<!(detail::has_data<T>::value && detail::has_size<T>::value), int> Allreduce(
76  T& local, T& global, MPI_Op operation, MPI_Comm comm = MPI_COMM_WORLD)
77 {
78  return MPI_Allreduce(&local, &global, 1, mpi::detail::mpi_t<T>::type(), operation, comm);
79 }
80 
90 template <typename T>
91 std::enable_if_t<(detail::has_data<T>::value && detail::has_size<T>::value), int> Allreduce(
92  T& local, T& global, MPI_Op operation, MPI_Comm comm = MPI_COMM_WORLD)
93 {
94  return MPI_Allreduce(local.data(), global.data(), local.size(), mpi::detail::mpi_t<typename T::value_type>::type(),
95  operation, comm);
96 }
97 
105 template <typename T>
106 std::enable_if_t<!(detail::has_data<T>::value && detail::has_size<T>::value), int> Broadcast(
107  T& buf, int root = 0, MPI_Comm comm = MPI_COMM_WORLD)
108 {
109  return MPI_Bcast(&buf, 1, mpi::detail::mpi_t<T>::type(), root, comm);
110 }
111 
119 template <typename T>
120 std::enable_if_t<(detail::has_data<T>::value && detail::has_size<T>::value), int> Broadcast(
121  T& buf, int root = 0, MPI_Comm comm = MPI_COMM_WORLD)
122 {
123  return MPI_Bcast(buf.data(), static_cast<int>(buf.size()), mpi::detail::mpi_t<typename T::value_type>::type(), root,
124  comm);
125 }
126 
136 template <typename T>
137 int Allgatherv(T& buf, T& values_on_rank, std::vector<int>& size_on_rank, std::vector<int>& offsets_on_rank,
138  MPI_Comm comm = MPI_COMM_WORLD)
139 {
140  return MPI_Allgatherv(buf.data(), static_cast<int>(buf.size()), detail::mpi_t<typename T::value_type>::type(),
141  values_on_rank.data(), size_on_rank.data(), offsets_on_rank.data(),
143 }
144 
156 template <typename T>
157 int Gatherv(T& buf, T& values_on_rank, std::vector<int>& size_on_rank, std::vector<int>& offsets_on_rank, int root = 0,
158  MPI_Comm comm = MPI_COMM_WORLD)
159 {
160  return MPI_Gatherv(buf.data(), static_cast<int>(buf.size()), detail::mpi_t<typename T::value_type>::type(),
161  values_on_rank.data(), size_on_rank.data(), offsets_on_rank.data(),
163 }
164 
174 template <typename T>
175 int Scatterv(T& sendbuf, std::vector<int>& variables_per_rank, std::vector<int>& offsets, T& recvbuff, int root = 0,
176  MPI_Comm comm = MPI_COMM_WORLD)
177 {
178  // only check the size of the recv buff in debug mode
179  assert(static_cast<typename T::size_type>(
180  [&]() { return variables_per_rank[static_cast<std::size_t>(mpi::getRank(comm))]; }()) == recvbuff.size());
181 
182  return MPI_Scatterv(sendbuf.data(), variables_per_rank.data(), offsets.data(),
184  static_cast<int>(recvbuff.size()), mpi::detail::mpi_t<typename T::value_type>::type(), root, comm);
185 }
186 
196 template <typename T>
197 int Irecv(T& buf, int send_rank, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD)
198 {
199  return MPI_Irecv(buf.data(), static_cast<int>(buf.size()), mpi::detail::mpi_t<typename T::value_type>::type(),
200  send_rank, tag, comm, request);
201 }
202 
213 template <typename T>
214 int Isend(T& buf, int recv_rank, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD)
215 {
216  return MPI_Isend(buf.data(), static_cast<int>(buf.size()), mpi::detail::mpi_t<typename T::value_type>::type(),
217  recv_rank, tag, comm, request);
218 }
219 
227 int Waitall(std::vector<MPI_Request>& requests, std::vector<MPI_Status>& status)
228 {
229  return MPI_Waitall(static_cast<int>(requests.size()), requests.data(), status.data());
230 }
231 
232 int CreateAndSetErrorHandler(MPI_Errhandler& newerr, void (*err)(MPI_Comm* comm, int* err, ...),
233  MPI_Comm comm = MPI_COMM_WORLD)
234 {
235  MPI_Comm_create_errhandler(err, &newerr);
236  return MPI_Comm_set_errhandler(comm, newerr);
237 }
238 
239 } // namespace mpi
240 
241 } // namespace op
int getRank(MPI_Comm comm=MPI_COMM_WORLD)
Get rank.
Definition: op_mpi.hpp:50
int Waitall(std::vector< MPI_Request > &requests, std::vector< MPI_Status > &status)
A wrapper to MPI_Waitall to wait for all the requests to be fulfilled.
Definition: op_mpi.hpp:227
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Broadcast(T &buf, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast a single element to all ranks on the communicator.
Definition: op_mpi.hpp:106
int Irecv(T &buf, int send_rank, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Recieve a buffer from a specified rank and create a handle for the MPI_Request.
Definition: op_mpi.hpp:197
int Allgatherv(T &buf, T &values_on_rank, std::vector< int > &size_on_rank, std::vector< int > &offsets_on_rank, MPI_Comm comm=MPI_COMM_WORLD)
gathers a local collections from all ranks on all ranks on a communicator
Definition: op_mpi.hpp:137
int Gatherv(T &buf, T &values_on_rank, std::vector< int > &size_on_rank, std::vector< int > &offsets_on_rank, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
gathers a local collections from all ranks only on the root rank
Definition: op_mpi.hpp:157
int getNRanks(MPI_Comm comm=MPI_COMM_WORLD)
Get number of ranks.
Definition: op_mpi.hpp:58
int Isend(T &buf, int recv_rank, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Send a buffer to a specified rank and create a handle for the MPI_Request.
Definition: op_mpi.hpp:214
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Allreduce(T &local, T &global, MPI_Op operation, MPI_Comm comm=MPI_COMM_WORLD)
All reduce a single element across all ranks in a communicator.
Definition: op_mpi.hpp:75
int Scatterv(T &sendbuf, std::vector< int > &variables_per_rank, std::vector< int > &offsets, T &recvbuff, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
MPI_Scatterv on std::collections. Send only portions of buff to ranks.
Definition: op_mpi.hpp:175