HavoqGT
mailbox.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013, Lawrence Livermore National Security, LLC.
3  * Produced at the Lawrence Livermore National Laboratory.
4  * Written by Roger Pearce <rpearce@llnl.gov>.
5  * LLNL-CODE-644630.
6  * All rights reserved.
7  *
8  * This file is part of HavoqGT, Version 0.1.
9  * For details, see https://computation.llnl.gov/casc/dcca-pub/dcca/Downloads.html
10  *
11  * Please also read this link – Our Notice and GNU Lesser General Public License.
12  * http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License (as published by the Free
16  * Software Foundation) version 2.1 dated February 1999.
17  *
18  * This program is distributed in the hope that it will be useful, but WITHOUT ANY
19  * WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS FOR A
20  * PARTICULAR PURPOSE. See the terms and conditions of the GNU General Public
21  * License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public License along
24  * with this program; if not, write to the Free Software Foundation, Inc.,
25  * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  *
27  * OUR NOTICE AND TERMS AND CONDITIONS OF THE GNU GENERAL PUBLIC LICENSE
28  *
29  * Our Preamble Notice
30  *
31  * A. This notice is required to be provided under our contract with the
32  * U.S. Department of Energy (DOE). This work was produced at the Lawrence
33  * Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
34  *
35  * B. Neither the United States Government nor Lawrence Livermore National
36  * Security, LLC nor any of their employees, makes any warranty, express or
37  * implied, or assumes any liability or responsibility for the accuracy,
38  * completeness, or usefulness of any information, apparatus, product, or process
39  * disclosed, or represents that its use would not infringe privately-owned rights.
40  *
41  * C. Also, reference herein to any specific commercial products, process, or
42  * services by trade name, trademark, manufacturer or otherwise does not
43  * necessarily constitute or imply its endorsement, recommendation, or favoring by
44  * the United States Government or Lawrence Livermore National Security, LLC. The
45  * views and opinions of authors expressed herein do not necessarily state or
46  * reflect those of the United States Government or Lawrence Livermore National
47  * Security, LLC, and shall not be used for advertising or product endorsement
48  * purposes.
49  *
50  */
51 
52 #ifndef HAVOQGT_MPI_MAILBOX_ROUTED_HPP_INCLUDED
53 #define HAVOQGT_MPI_MAILBOX_ROUTED_HPP_INCLUDED
54 
55 #include <havoqgt/mpi.hpp>
56 #include <havoqgt/environment.hpp>
57 #include <vector>
58 #include <list>
59 #include <limits>
60 #include <deque>
61 #include <boost/unordered_map.hpp>
62 #include <stdio.h>
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <stdint.h>
66 #include <boost/tuple/tuple.hpp>
67 
68 namespace havoqgt { namespace mpi {
69 
70 
71 
72 
73 
74 template <typename TMsg>
76 
77  // class routed_msg_type {
78  // public:
79  // routed_msg_type(uint32_t _dest, const TMsg& _msg)
80  // : msg(_msg) { }
81  // TMsg msg;
82  // //uint32_t m_dest;
83  // uint32_t dest() const { return msg.vertex.owner();/*m_dest;*/ }
84  // bool is_tree_op() const { return msg.vertex.is_bcast() ||
85  // msg.vertex.is_parent_op(); }
86  // };
87 
88  typedef TMsg routed_msg_type;
89 
90  class twod_router{
91  public:
92  //const static int procs_per_node = 16;
95  twod_router(uint32_t rank, uint32_t size) {
96  procs_per_node = std::min(size,uint32_t(24));
97  uint64_t rank_to_print = 2;
98  //if(rank == rank_to_print)
99  //std::cout << "Rank " << rank_to_print << "'s bcast_targets: ";
100  my_node_base_rank = rank - (rank % procs_per_node);;
101  for(uint32_t i=0; i<procs_per_node; ++i) {
102  uint64_t target = my_node_base_rank + i;
103  m_bcast_targets.push_back(target);
104  //if(rank==rank_to_print)
105  //std::cout << target << " ";
106  }
107  //if(rank==rank_to_print)
108  // std::cout << std::endl << "Rank " << rank_to_print << "'s bcast_proxies: ";
109  uint64_t node_offset = rank % procs_per_node;
110  for(uint32_t i=0; i<size / procs_per_node; ++i) {
111  uint64_t proxy = (i * procs_per_node) + node_offset;
112  m_bcast_proxies.push_back(proxy);
113  //if(rank==rank_to_print)
114  // std::cout << proxy << " ";
115  }
116  //if(rank == rank_to_print)
117  // std::cout << std::endl;
118  }
119  uint32_t proxy_rank(uint32_t dest) {
120  uint64_t dest_offset = dest % procs_per_node;
121  return my_node_base_rank + dest_offset;
122  }
123  const std::vector<uint32_t>& bcast_targets() const { return m_bcast_targets; }
124  const std::vector<uint32_t>& bcast_proxies() const { return m_bcast_proxies; }
125  private:
126  std::vector<uint32_t> m_bcast_proxies;
127  std::vector<uint32_t> m_bcast_targets;
129  };
130 
131 
132  class msg_buffer {
133  public:
134  msg_buffer():m_size(0),m_ptr(NULL) { }
135  msg_buffer(void* _ptr):m_size(0),m_ptr(_ptr) { }
136 
137  size_t push_back(const routed_msg_type& _msg) {
138  static_cast<routed_msg_type*>(m_ptr)[m_size] = _msg;
139  return ++m_size;
140  }
141 
142  routed_msg_type& operator[](size_t i) { return static_cast<routed_msg_type*>(m_ptr)[i]; }
143 
144  size_t size_in_bytes() { return m_size * sizeof(routed_msg_type); }
145 
146  size_t size() const { return m_size; }
147  bool empty() const { return m_size == 0; }
148  void clear() { m_size = 0; m_ptr = NULL; }
149  bool is_init() const {return m_ptr != NULL; }
150  void* get_ptr() const {return m_ptr; }
151 
152 
153  private:
154  size_t m_size;
155  void* m_ptr;
156  };
157 public:
158  typedef TMsg message_type;
159 
160  mailbox_routed( MPI_Comm _mpi_comm,
161  int _mpi_tag):
162  m_mpi_comm(_mpi_comm),
163  m_mpi_tag(_mpi_tag) {
166 
167  m_last_recv_count = 0;
168 
169  //statistics
170  m_mpi_send_counter = 0;
172  m_route_counter = 0;
173  m_send_counter = 0;
174  m_recv_counter = 0;
175 
176  m_receiving = false;
177 
178 
179  CHK_MPI(MPI_Comm_rank(m_mpi_comm, &m_mpi_rank) );
180  CHK_MPI(MPI_Comm_size(m_mpi_comm, &m_mpi_size) );
181 
182  //Allocate buffer slots for each rank.
183  //This does not acutally allocate the buffer's memory
187 
188 
189  for(size_t i=0; i<get_environment().mailbox_num_irecv(); ++i) {
190  void* irecv_buff = NULL;
191  int ret = posix_memalign(&irecv_buff, 32,
192  get_environment().mailbox_aggregation() * sizeof(routed_msg_type));
193  if(ret !=0) {
194  perror("posix_memalign-irecv"); exit(-1);
195  }
196  post_new_irecv(irecv_buff);
197  }
198  m_2d_comm = twod_router(m_mpi_rank, m_mpi_size);
199 
201  m_tree_child1 = (m_mpi_rank * 2) + 1;
202  m_tree_child2 = (m_mpi_rank * 2) + 2;
203  }
204 
206  assert(m_pending_partial_buffers == 0);
207  while(m_num_pending_isend > 0) {
209  }
210  while(!m_list_irecv_request.empty()) {
211  CHK_MPI( MPI_Cancel( &(m_list_irecv_request.front().first) ) );
212  free(m_list_irecv_request.front().second);
213  m_list_irecv_request.pop_front();
214  }
215  for(size_t i=0; i<m_vec_free_buffers.size(); ++i) {
216  free(m_vec_free_buffers[i]);
217  }
218 
219  if(get_environment().mailbox_print_stats()) {
220  uint64_t g_mpi_send_counter = mpi_all_reduce(m_mpi_send_counter, std::plus<uint64_t>(), MPI_COMM_WORLD);
221  uint64_t g_tree_send_counter = mpi_all_reduce(m_tree_send_counter, std::plus<uint64_t>(), MPI_COMM_WORLD);
222  uint64_t g_route_counter = mpi_all_reduce(m_route_counter, std::plus<uint64_t>(), MPI_COMM_WORLD);
223  uint64_t g_send_counter = mpi_all_reduce(m_send_counter, std::plus<uint64_t>(), MPI_COMM_WORLD);
224  uint64_t g_recv_counter = mpi_all_reduce(m_recv_counter, std::plus<uint64_t>(), MPI_COMM_WORLD);
225  if(m_mpi_rank == 0) {
226  std::cout << "****************** Mailbox Statistics ********************" << std::endl;
227  std::cout << "routed message size = " << sizeof(TMsg) << std::endl;
228  std::cout << "mpi_send_counter = " << g_mpi_send_counter << std::endl;
229  std::cout << "tree_send_counter = " << g_tree_send_counter << std::endl;
230  std::cout << "route_counter = " << g_route_counter << std::endl;
231  std::cout << "send_counter = " << g_send_counter << std::endl;
232  std::cout << "recv_counter = " << g_recv_counter << std::endl;
233  std::cout << "Average send size = " << double(g_send_counter + g_route_counter) / double(g_mpi_send_counter) << std::endl;
234  std::cout << "***********************************************************" << std::endl;
235  }
236  }
237 
238  CHK_MPI( MPI_Barrier( m_mpi_comm ) );
239  }
240 
241  void send_tree_fast(int raw_dest, const TMsg& _raw_msg) {
243  routed_msg_type msg(raw_dest, _raw_msg);
244  if(!m_buffer_per_rank[raw_dest].is_init()) {
247  m_list_pending.push_back(raw_dest);
248  m_pending_iterator_per_rank[raw_dest] = --m_list_pending.end();
249  }
250  size_t size = m_buffer_per_rank[raw_dest].push_back(msg);
251 
252  if(size >= get_environment().mailbox_tree_aggregation() ) {
253  post_isend(raw_dest);
255  }
256  }
257 
258  void send_tree_parent(const TMsg& _raw_msg) {
259  send_tree_fast(m_tree_parent, _raw_msg);
260  }
261 
262  void send_tree_children(const TMsg& _raw_msg) {
263  if(m_tree_child1 < m_mpi_size) {
264  send_tree_fast(m_tree_child1, _raw_msg);
265  if(m_tree_child2 < m_mpi_size) {
266  send_tree_fast(m_tree_child2, _raw_msg);
267  }
268  }
269  }
270 
271  void route_fast_path(uint32_t dest, const routed_msg_type& _msg) {
272  ++m_route_counter;
273  if(!m_buffer_per_rank[dest].is_init()) {
276  m_list_pending.push_back(dest);
278  }
279  size_t size = m_buffer_per_rank[dest].push_back(_msg);
280 
281  if(size == get_environment().mailbox_aggregation() ) {
282  post_isend(dest);
284  }
285  }
286 
287  template <typename OutputIterator>
288  void bcast(TMsg _raw_msg, OutputIterator _oitr) {
289  assert(_raw_msg.get_bcast());
290  _raw_msg.set_dest(m_mpi_size+1);
291  for(uint32_t i=0; i<m_2d_comm.bcast_proxies().size(); ++i) {
292  uint32_t proxy_rank = m_2d_comm.bcast_proxies()[i];
293  if(proxy_rank == uint32_t(m_mpi_rank)) {
294  bcast_to_targets(_raw_msg);
295  } else {
296  route_fast_path(proxy_rank, _raw_msg);
297  }
298  }
299  if(m_receiving) return; //prevent receive recursion @todo, make this a function called wait
300  do {
302  receive(_oitr);
303  } while(m_num_pending_isend > get_environment().mailbox_num_isend());
304  }
305 
306  void bcast_to_targets(TMsg _msg) {
307  assert(_msg.get_bcast());
308  for(uint32_t i=0; i<m_2d_comm.bcast_targets().size(); ++i) {
309  uint32_t target = m_2d_comm.bcast_targets()[i];
310  _msg.set_dest(target);
311  route_fast_path(target, _msg);
312  }
313  }
314 
315 
316 
317 
319  /*if(!m_list_pending.empty()) {
320  size_t to_check = m_list_pending.front();
321  size_t size = m_send_ticket_per_rank.size();
322  //size_t to_check = ++m_fair_checker % size;
323  if(m_send_counter > m_send_ticket_per_rank[to_check] + size) {
324  post_isend(to_check);
325  }
326  }*/
327  }
328 
329  template <typename OutputIterator>
330  void send(int raw_dest, const TMsg& _raw_msg, OutputIterator _oitr, bool fast=true) {
331  ++m_send_counter;
332  assert(raw_dest >= 0 && raw_dest < m_mpi_size);
333  //++m_last_recv_count;
334  assert(raw_dest != m_mpi_rank); // just dont send to self!
335  //routed_msg_type msg(raw_dest, _raw_msg);
336  routed_msg_type msg = _raw_msg; //@todo fixme
337  int dest = m_2d_comm.proxy_rank(raw_dest);
338  if(dest == m_mpi_rank) dest = raw_dest;
339  if(!m_buffer_per_rank[dest].is_init()) {
342  m_list_pending.push_back(dest);
344  }
345  size_t size = m_buffer_per_rank[dest].push_back(msg);
346 
347  if(size == get_environment().mailbox_aggregation() ) {
348  post_isend(dest);
350  if(m_receiving) return; //prevent receive recursion
351  do {
352  //cleanup_pending_isend_requests_index(dest);
354  receive(_oitr);
355  //} while(m_list_isend_request_per_rank[dest].size() > 1);
356  } while(m_num_pending_isend > get_environment().mailbox_num_isend());
357  }
358  //if(m_last_recv_count > 128) {
359  // receive(_oitr);
360  //}
361  }
362 
363  template <typename OutputIterator > //was vector but also want list
364  void receive(OutputIterator _oitr, bool aggregsive=false) {
365  m_receiving = true;
366  m_last_recv_count = 0;
367  int flag(0);
368  //do {
369  if(!m_list_irecv_request.empty()) {
370  std::pair<MPI_Request, void* > pair_req = m_list_irecv_request.front();
371  m_list_irecv_request.pop_front();
372  MPI_Request* request_ptr = &(pair_req.first);
373  MPI_Status status;
374  CHK_MPI( MPI_Test( request_ptr, &flag, &status) );
375  if(flag) {
376  routed_msg_type* recv_ptr = static_cast<routed_msg_type*> (
377  pair_req.second );
378  int count(0);
379  CHK_MPI( MPI_Get_count(&status, MPI_BYTE, &count) );
380  for(size_t i=0; i<count/sizeof(routed_msg_type); ++i) {
381  if(recv_ptr[i].dest() == uint32_t(m_mpi_rank) /*|| recv_ptr[i].is_tree_op()*/) {
382  *_oitr = recv_ptr[i];//.msg;
383  ++_oitr;
384  ++m_recv_counter;
385  } else if(recv_ptr[i].get_bcast()) {
386  bcast_to_targets(recv_ptr[i]);
387  } else if(recv_ptr[i].is_intercept()) {
388  if( _oitr.intercept(recv_ptr[i]) ) {
389  route_fast_path(recv_ptr[i].dest(), recv_ptr[i]);
390  }
391  } else {
392  route_fast_path(recv_ptr[i].dest(), recv_ptr[i]);
393  }
394  }
395  post_new_irecv(recv_ptr);
396  } else {
397  m_list_irecv_request.push_front(pair_req);
398  }
399  }
400  //} while(flag);
401  m_receiving = false;
402  }
403 
404 
405  bool is_idle() {
408  }
409 
411  if(!m_list_pending.empty()) {
412  size_t index = m_list_pending.front();
413  if(m_num_pending_isend < 1 && !m_buffer_per_rank[index].empty()) {
414  post_isend(index);
415  }
416  }
417  }
418 
419  int comm_rank() const { return m_mpi_rank; }
420  int comm_size() const { return m_mpi_size; }
421 
422 
423 private:
424  msg_buffer allocate_msg_buffer() {
425  if(m_vec_free_buffers.empty()) {
426  void* buff = NULL;
427  int ret = posix_memalign(&buff, 32,
428  get_environment().mailbox_aggregation() * sizeof(routed_msg_type));
429  if(ret !=0) {
430  perror("posix_memalign"); exit(-1);
431  }
432  m_vec_free_buffers.push_back(buff);
433  }
434  msg_buffer to_return(m_vec_free_buffers.back());
435  m_vec_free_buffers.pop_back();
436  return to_return;
437  }
438 
439  void free_msg_buffer(void* _ptr) {
440  m_vec_free_buffers.push_back(_ptr);
441  }
442 
443  void post_isend(int index) {
444  if(m_buffer_per_rank[index].empty()) return;
446  int dest = index;
447  bool was_first_pending = false;
448  if(m_pending_iterator_per_rank[dest] != m_list_pending.end()) {
449  if(m_pending_iterator_per_rank[dest] == m_list_pending.begin()) {
450  was_first_pending = true;
451  }
454  }
455 
456  m_list_isends.push_back(index);
457  boost::tuple<MPI_Request, void*,std::list<size_t>::iterator> isend_req_tuple;
458  MPI_Request* request_ptr = &(isend_req_tuple.get<0>());
459  isend_req_tuple.get<1>() = m_buffer_per_rank[index].get_ptr();
460  isend_req_tuple.get<2>() = --m_list_isends.end();
461  void* buffer_ptr = m_buffer_per_rank[index].get_ptr();
462  int size_in_bytes = m_buffer_per_rank[index].size_in_bytes();
463 
464  CHK_MPI( MPI_Isend( buffer_ptr, size_in_bytes, MPI_BYTE, dest,
465  m_mpi_tag, m_mpi_comm, request_ptr) );
466 
468  m_buffer_per_rank[index].clear();
469  //int flag(0);
470  //CHK_MPI( MPI_Test( request_ptr, &flag, MPI_STATUS_IGNORE) );
471  //if(!flag) {
472  m_list_isend_request_per_rank[index].push_back(isend_req_tuple);
474  //}
476  if(!was_first_pending/* && m_tree_parent != index && m_tree_child1 != index && m_tree_child2 != index*/) {
477  post_isend(m_list_pending.front());
478  }
479  }
480 
482  bool to_return = false;
483  if(m_list_isend_request_per_rank[index].empty()) return true;
484  while(!m_list_isend_request_per_rank[index].empty()) {
485  int flag(0);
486  MPI_Request* request_ptr = &(m_list_isend_request_per_rank[index].front().get<0>());
487  CHK_MPI( MPI_Test( request_ptr, &flag, MPI_STATUS_IGNORE) );
488  if(flag) {
489  free_msg_buffer(m_list_isend_request_per_rank[index].front().get<1>());
490  m_list_isends.erase(m_list_isend_request_per_rank[index].front().get<2>());
491  m_list_isend_request_per_rank[index].pop_front();
493  to_return = true;
494  } else {
495  break;
496  }
497  }
498  return to_return;
499  }
500 
504  void cleanup_pending_isend_requests(bool force_aggressive = false) {
505  while(!m_list_isends.empty()) {
507  if(!found) break;
508  }
509  }
510 
511  void post_new_irecv(void* _buff) {
512  std::pair<MPI_Request, void*> irecv_req;
513  irecv_req.second = _buff;
514  MPI_Request* request_ptr = &(irecv_req.first);
515  int num_bytes = get_environment().mailbox_aggregation() * sizeof(routed_msg_type);
516  CHK_MPI( MPI_Irecv( _buff, num_bytes, MPI_BYTE, MPI_ANY_SOURCE,
517  m_mpi_tag, m_mpi_comm, request_ptr) );
518  m_list_irecv_request.push_back(irecv_req);
519  }
520 
521 
523  MPI_Comm m_mpi_comm;
527 
531 
532  std::vector<void*> m_vec_free_buffers;
533  std::vector<msg_buffer> m_buffer_per_rank;
534  //boost::unordered_map<uint32_t, msg_buffer> m_buffer_per_rank;
535 
536  std::vector< std::list< boost::tuple<MPI_Request, void*, std::list<size_t>::iterator > > > m_list_isend_request_per_rank;
537  //boost::unordered_map<uint64_t, std::list< boost::tuple<MPI_Request, void*, std::list<size_t>::iterator > > > m_list_isend_request_per_rank;
538  std::list <size_t> m_list_isends;
539  std::list < std::pair<MPI_Request, void*> > m_list_irecv_request;
540 
541  twod_router m_2d_comm;
542 
543  std::vector< std::list<size_t>::iterator > m_pending_iterator_per_rank;
544  //boost::unordered_map<uint64_t, std::list<size_t>::iterator > m_pending_iterator_per_rank;
545  std::list <size_t> m_list_pending;
546 
550 
552 
553  //Statistics
556  uint64_t m_route_counter;
557  uint64_t m_send_counter;
558  uint64_t m_recv_counter;
559 
560 };
561 
562 
563 }} //namespace havoqgt { namespace mpi {
564 
565 
566 
567 #endif //HAVOQGT_MPI_MAILBOX_ROUTED_HPP_INCLUDED
std::vector< std::list< boost::tuple< MPI_Request, void *, std::list< size_t >::iterator > > > m_list_isend_request_per_rank
Definition: mailbox.hpp:536
void send_tree_parent(const TMsg &_raw_msg)
Definition: mailbox.hpp:258
msg_buffer allocate_msg_buffer()
Definition: mailbox.hpp:424
void receive(OutputIterator _oitr, bool aggregsive=false)
Definition: mailbox.hpp:364
std::vector< uint32_t > m_bcast_proxies
Definition: mailbox.hpp:126
std::list< size_t > m_list_isends
Definition: mailbox.hpp:538
void post_isend(int index)
Definition: mailbox.hpp:443
const std::vector< uint32_t > & bcast_proxies() const
Definition: mailbox.hpp:124
routed_msg_type & operator[](size_t i)
Definition: mailbox.hpp:142
std::list< std::pair< MPI_Request, void * > > m_list_irecv_request
Definition: mailbox.hpp:539
size_t push_back(const routed_msg_type &_msg)
Definition: mailbox.hpp:137
uint32_t proxy_rank(uint32_t dest)
Definition: mailbox.hpp:119
uint32_t mailbox_num_irecv() const
Definition: environment.hpp:79
mailbox_routed(MPI_Comm _mpi_comm, int _mpi_tag)
Definition: mailbox.hpp:160
void bcast(TMsg _raw_msg, OutputIterator _oitr)
Definition: mailbox.hpp:288
std::vector< uint32_t > m_bcast_targets
Definition: mailbox.hpp:127
MPI_Comm m_mpi_comm
MPI configuration.
Definition: mailbox.hpp:523
void free_msg_buffer(void *_ptr)
Definition: mailbox.hpp:439
uint32_t mailbox_aggregation() const
Definition: environment.hpp:81
void send_tree_children(const TMsg &_raw_msg)
Definition: mailbox.hpp:262
T mpi_all_reduce(T in_d, Op in_op, MPI_Comm mpi_comm)
Definition: mpi.hpp:176
std::vector< msg_buffer > m_buffer_per_rank
Definition: mailbox.hpp:533
void bcast_to_targets(TMsg _msg)
Definition: mailbox.hpp:306
void route_fast_path(uint32_t dest, const routed_msg_type &_msg)
Definition: mailbox.hpp:271
void cleanup_pending_isend_requests(bool force_aggressive=false)
Definition: mailbox.hpp:504
void send(int raw_dest, const TMsg &_raw_msg, OutputIterator _oitr, bool fast=true)
Definition: mailbox.hpp:330
old_environment & get_environment()
#define CHK_MPI(a)
Definition: mpi.hpp:68
bool cleanup_pending_isend_requests_index(size_t index)
Definition: mailbox.hpp:481
void send_tree_fast(int raw_dest, const TMsg &_raw_msg)
Definition: mailbox.hpp:241
const std::vector< uint32_t > & bcast_targets() const
Definition: mailbox.hpp:123
std::vector< std::list< size_t >::iterator > m_pending_iterator_per_rank
Definition: mailbox.hpp:543
std::list< size_t > m_list_pending
Definition: mailbox.hpp:545
std::vector< void * > m_vec_free_buffers
Definition: mailbox.hpp:532
twod_router(uint32_t rank, uint32_t size)
Definition: mailbox.hpp:95
void post_new_irecv(void *_buff)
Definition: mailbox.hpp:511