OP  0.1
OP is a optimization solver plugin package
 All Classes Namespaces Functions Variables Typedefs Enumerations Friends
comm_validator.cpp
1 #include "op.hpp"
2 #include "op_debug.hpp"
3 
4 int main(int argc, char * argv[]) {
5 
6  MPI_Init(&argc, &argv);
7 
8  int my_rank = op::mpi::getRank();
9 
10  auto comm_pattern = op::debug::readCommPatternFromDisk<std::vector<std::size_t>>(my_rank);
11 
12  auto [global_size, variables_per_rank] =
13  op::utility::parallel::gatherVariablesPerRank<int>(comm_pattern.owned_variable_list.size());
14  std::vector<int> owned_variables_per_rank_ = variables_per_rank;
15  auto owned_offsets_ = op::utility::buildInclusiveOffsets(owned_variables_per_rank_);
16 
17  // get all the labels globally that are owned (they should be unique)
18  auto global_labels = op::utility::parallel::concatGlobalVector(global_size, owned_variables_per_rank_, owned_offsets_, comm_pattern.owned_variable_list);
19 
20  // build global "truth" values for verification
21  std::vector<double> global_values_truth(global_size);
22  for (int i = 0; i < global_size; i++) {
23  global_values_truth[i] = static_cast<double>(i);
24  }
25 
26  // on each rank update values by adding shift to global_values that each rank owns
27  const double shift = 1.;
28 
29  auto global_values_expect = global_values_truth;
30  for (auto i = owned_offsets_[static_cast<std::size_t>(my_rank)] ; i < owned_offsets_[static_cast<std::size_t>(my_rank) + 1]; i++) {
31  global_values_expect[i] += shift;
32  }
33 
34  // do global serial modification
35  auto global_values_mod = global_values_truth;
36  for (auto & v : global_values_mod) {
37  v += shift;
38  }
39 
40  std::vector<double> owned_data(comm_pattern.owned_variable_list.size());
41  std::vector<double> empty;
42 
43  if (my_rank != 0) {
44  op::mpi::Scatterv(empty, owned_variables_per_rank_, owned_offsets_, owned_data);
45  } else {
46  // root node
47  op::mpi::Scatterv(global_values_mod, owned_variables_per_rank_, owned_offsets_, owned_data);
48  }
49 
50  // check values on owned ranks first
51  for (int owned_var = 0; owned_var < owned_variables_per_rank_[my_rank]; owned_var++) {
52  if (std::abs(global_values_expect[owned_offsets_[my_rank] + owned_var] - owned_data[owned_var]) >= 1.e-8) {
53  std::cout << "owned_var mismatch on " << my_rank << " (" << owned_var << ") : " << global_values_expect[owned_offsets_[my_rank] + owned_var] << " " << owned_data[owned_var] << std::endl;
54  }
55  }
56 
57  // check if things are being relayed back to local variables appropriately
58  auto global_reduced_map_to_local = op::utility::inverseMap(comm_pattern.local_variable_list);
59 
60  std::vector<std::size_t> index_map;
61  for (auto id : comm_pattern.owned_variable_list) {
62  index_map.push_back(global_reduced_map_to_local[id][0]);
63  }
64  // temporary local_data
65  std::vector<double> local_data(comm_pattern.local_variable_list.size());
66  std::vector<double> local_variables(comm_pattern.local_variable_list.size());
67  op::utility::accessPermuteStore(owned_data, index_map, local_data);
68 
69  local_variables = op::ReturnLocalUpdatedVariables(comm_pattern.rank_communication,
70  global_reduced_map_to_local, local_data);
71 
72  // check values of variables
73  for (std::size_t i = 0; i < local_variables.size(); i++) {
74  auto it = std::find(global_labels.begin(), global_labels.end(), comm_pattern.local_variable_list[i]);
75  auto offset = it - global_labels.begin();
76  if (std::abs(global_values_mod[offset] - local_variables[i]) >= 1.e-8) {
77  std::cout << "reduced_update mismatch on " << my_rank << " (" << i << ") : " << global_values_mod[offset] << " " <<local_variables[i] << std::endl;
78  }
79  }
80 
81 
82 
83  MPI_Finalize();
84  return 0;
85 }
int getRank(MPI_Comm comm=MPI_COMM_WORLD)
Get rank.
Definition: op_mpi.hpp:50
auto inverseMap(T &vector_map)
Inverts a vector that providse a map into an unordered_map.
Definition: op_utility.hpp:417
V concatGlobalVector(typename V::size_type global_size, std::vector< int > &variables_per_rank, std::vector< int > &offsets, V &local_vector, bool gatherAll=true, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Assemble a vector by concatination of local_vector across all ranks on a communicator.
Definition: op_utility.hpp:99
ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication< T > &info, I &global_ids_to_local, ValuesType &reduced_values)
Generate update method to propagate owned local variables back to local variables in parallel...
Definition: op.hpp:313
void accessPermuteStore(T &vector, M &map, T &results)
Retrieves from T and stores in permuted mapping M, result[M[i]] = T[i].
Definition: op_utility.hpp:303
std::vector< T > buildInclusiveOffsets(std::vector< T > &values_per_rank)
Takes in sizes per index and and performs a rank-local inclusive offset.
Definition: op_utility.hpp:43
int Scatterv(T &sendbuf, std::vector< int > &variables_per_rank, std::vector< int > &offsets, T &recvbuff, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
MPI_Scatterv on std::collections. Send only portions of buff to ranks.
Definition: op_mpi.hpp:175