2 #include "op_debug.hpp"
4 int main(
int argc,
char * argv[]) {
6 MPI_Init(&argc, &argv);
10 auto comm_pattern = op::debug::readCommPatternFromDisk<std::vector<std::size_t>>(my_rank);
12 auto [global_size, variables_per_rank] =
13 op::utility::parallel::gatherVariablesPerRank<int>(comm_pattern.owned_variable_list.size());
14 std::vector<int> owned_variables_per_rank_ = variables_per_rank;
21 std::vector<double> global_values_truth(global_size);
22 for (
int i = 0; i < global_size; i++) {
23 global_values_truth[i] =
static_cast<double>(i);
27 const double shift = 1.;
29 auto global_values_expect = global_values_truth;
30 for (
auto i = owned_offsets_[static_cast<std::size_t>(my_rank)] ; i < owned_offsets_[static_cast<std::size_t>(my_rank) + 1]; i++) {
31 global_values_expect[i] += shift;
35 auto global_values_mod = global_values_truth;
36 for (
auto & v : global_values_mod) {
40 std::vector<double> owned_data(comm_pattern.owned_variable_list.size());
41 std::vector<double> empty;
47 op::mpi::Scatterv(global_values_mod, owned_variables_per_rank_, owned_offsets_, owned_data);
51 for (
int owned_var = 0; owned_var < owned_variables_per_rank_[my_rank]; owned_var++) {
52 if (std::abs(global_values_expect[owned_offsets_[my_rank] + owned_var] - owned_data[owned_var]) >= 1.e-8) {
53 std::cout <<
"owned_var mismatch on " << my_rank <<
" (" << owned_var <<
") : " << global_values_expect[owned_offsets_[my_rank] + owned_var] <<
" " << owned_data[owned_var] << std::endl;
60 std::vector<std::size_t> index_map;
61 for (
auto id : comm_pattern.owned_variable_list) {
62 index_map.push_back(global_reduced_map_to_local[
id][0]);
65 std::vector<double> local_data(comm_pattern.local_variable_list.size());
66 std::vector<double> local_variables(comm_pattern.local_variable_list.size());
70 global_reduced_map_to_local, local_data);
73 for (std::size_t i = 0; i < local_variables.size(); i++) {
74 auto it = std::find(global_labels.begin(), global_labels.end(), comm_pattern.local_variable_list[i]);
75 auto offset = it - global_labels.begin();
76 if (std::abs(global_values_mod[offset] - local_variables[i]) >= 1.e-8) {
77 std::cout <<
"reduced_update mismatch on " << my_rank <<
" (" << i <<
") : " << global_values_mod[offset] <<
" " <<local_variables[i] << std::endl;
int getRank(MPI_Comm comm=MPI_COMM_WORLD)
Get rank.
auto inverseMap(T &vector_map)
Inverts a vector that providse a map into an unordered_map.
V concatGlobalVector(typename V::size_type global_size, std::vector< int > &variables_per_rank, std::vector< int > &offsets, V &local_vector, bool gatherAll=true, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Assemble a vector by concatination of local_vector across all ranks on a communicator.
ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication< T > &info, I &global_ids_to_local, ValuesType &reduced_values)
Generate update method to propagate owned local variables back to local variables in parallel...
void accessPermuteStore(T &vector, M &map, T &results)
Retrieves from T and stores in permuted mapping M, result[M[i]] = T[i].
std::vector< T > buildInclusiveOffsets(std::vector< T > &values_per_rank)
Takes in sizes per index and and performs a rank-local inclusive offset.
int Scatterv(T &sendbuf, std::vector< int > &variables_per_rank, std::vector< int > &offsets, T &recvbuff, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
MPI_Scatterv on std::collections. Send only portions of buff to ranks.