OP  0.1
OP is a optimization solver plugin package
 All Classes Namespaces Functions Variables Typedefs Enumerations Friends
nlopt_op.hpp
1 #pragma once
2 #include "op.hpp"
3 #include <nlopt.hpp>
4 #include "op_waitloop.hpp"
5 
7 namespace op {
8 
9 // forward declaration of NLoptFunctional
10 template <typename T>
11 double NLoptFunctional(const std::vector<double>& x, std::vector<double>& grad, void* objective_and_optimizer);
12 
13 // forward declarations for NLopt class
14 template <typename T>
15 class NLopt;
16 
17 // detail namespace
18 namespace detail {
20 template <typename T>
22  op::Functional obj; // we don't use a reference here incase we need to wrap obj
23  op::NLopt<T>& nlopt;
24  int state; // nlopt obj or constraint
25  double constraint_tol = 0.;
26  double constraint_val = 0.;
27  bool lower_bound = false;
28 };
29 
30 } // namespace detail
31 
33 using nlopt_index_type = std::vector<std::size_t>;
34 
40 auto wrapNLoptFunc(std::function<double(unsigned, const double*, double*, void*)> func)
41 {
42  auto obj_eval = [=](const std::vector<double>& x) {
43  return func(static_cast<unsigned int>(x.size()), x.data(), nullptr, nullptr);
44  };
45 
46  auto obj_grad = [=](const std::vector<double>& x) {
47  std::vector<double> grad(x.size());
48  func(static_cast<unsigned int>(x.size()), x.data(), grad.data(), nullptr);
49  return grad;
50  };
51  return std::make_tuple<op::Functional::EvalObjectiveFn, op::Functional::EvalObjectiveGradFn>(obj_eval, obj_grad);
52 }
53 
55 struct NLoptOptions {
56  std::unordered_map<std::string, int> Int;
57  std::unordered_map<std::string, double> Double;
58  std::unordered_map<std::string, std::string> String;
59  nlopt::algorithm algorithm = nlopt::LD_MMA;
60 };
61 
63 template <typename T = nlopt_index_type>
64 class NLopt : public op::Optimizer {
65 public:
67  explicit NLopt(op::Vector<std::vector<double>>& variables, NLoptOptions& o, std::optional<MPI_Comm> comm = {},
68  std::optional<op::utility::CommPattern<T>> comm_pattern_info = {})
69  : comm_(comm.has_value() ? comm.value() : MPI_COMM_NULL),
70  global_variables_(0),
71  variables_(variables),
72  options_(o),
73  comm_pattern_(comm_pattern_info),
74  global_reduced_map_to_local_({}),
75  num_local_owned_variables_(0)
76  {
77  std::cout << "NLOpt wrapper constructed" << std::endl;
78 
79  // if we want to run nlopt in "serial" mode, set rank = 0, otherwise get the rank in the communicator
80  auto rank = comm_ != MPI_COMM_NULL ? op::mpi::getRank(comm_) : 0;
81 
82  // Since this optimizer runs in serial we need to figure out the global number of decisionvariables
83  // in "advanced" mode some of the local_variables may not be unique to each partition
84  if (comm_pattern_.has_value()) {
85  // figure out what optimization variables we actually own
86  auto& comm_pattern = comm_pattern_.value();
87  num_local_owned_variables_ = comm_pattern.owned_variable_list.size();
88  // produce label to index map on each rank
89  global_reduced_map_to_local_ = op::utility::inverseMap(comm_pattern.local_variable_list);
90  } else {
91  num_local_owned_variables_ = variables.data().size();
92  }
93 
94  // in "simple" mode the local_variables are unique to each partition, but by the time
95  // we get here both the "advanced" and "simple" pattern is the same for calculating
96  // the actual optimization problem size
97  if (comm_ != MPI_COMM_NULL) {
98  auto [global_size, variables_per_rank] =
99  op::utility::parallel::gatherVariablesPerRank<int>(num_local_owned_variables_);
100  owned_variables_per_rank_ = variables_per_rank;
101  owned_offsets_ = op::utility::buildInclusiveOffsets(owned_variables_per_rank_);
102 
103  if (rank == root_rank_) {
104  global_variables_.resize(static_cast<std::size_t>(global_size));
105  }
106  } else {
107  global_variables_.resize(variables_.data().size());
108  }
109 
110  // Create nlopt optimizer
111  nlopt_ = std::make_unique<nlopt::opt>(options_.algorithm, global_variables_.size());
112 
113  // Set variable bounds
114  auto lowerBounds = variables.lowerBounds();
115  auto upperBounds = variables.upperBounds();
116 
117  // Adjust in "advanced" mode
118  if (isAdvanced()) {
119  auto& reduced_variable_list = comm_pattern_.value().owned_variable_list;
120  lowerBounds =
121  op::utility::permuteMapAccessStore(lowerBounds, reduced_variable_list, global_reduced_map_to_local_.value());
122  upperBounds =
123  op::utility::permuteMapAccessStore(upperBounds, reduced_variable_list, global_reduced_map_to_local_.value());
124  }
125 
126  // save initial set of variables to detect if variables changed
127  // set previous_variables to make the check
128  if (isAdvanced()) {
129  auto& reduced_variable_list = comm_pattern_.value().owned_variable_list;
130  auto reduced_previous_local_variables = op::utility::permuteMapAccessStore(
131  variables.data(), reduced_variable_list, global_reduced_map_to_local_.value());
132  previous_variables_ =
133  op::utility::parallel::concatGlobalVector(global_variables_.size(), owned_variables_per_rank_, owned_offsets_,
134  reduced_previous_local_variables, false); // gather on rank 0
135 
136  } else if (comm_ != MPI_COMM_NULL) {
137  // in this case everyone owns all of their variables
138  previous_variables_ =
139  op::utility::parallel::concatGlobalVector(global_variables_.size(), owned_variables_per_rank_, owned_offsets_,
140  variables.data(), false); // gather on rank 0
141  } else {
142  // "serial" case
143  previous_variables_ = variables.data();
144  }
145 
146  // initialize our starting global variables
147  global_variables_ = previous_variables_;
148 
149  if (comm_ != MPI_COMM_NULL) {
150  auto global_lower_bounds = op::utility::parallel::concatGlobalVector(
151  global_variables_.size(), owned_variables_per_rank_, owned_offsets_, lowerBounds, false); // gather on rank 0
152 
153  auto global_upper_bounds = op::utility::parallel::concatGlobalVector(
154  global_variables_.size(), owned_variables_per_rank_, owned_offsets_, upperBounds, false); // gather on rank 0
155  if (rank == root_rank_) {
156  nlopt_->set_lower_bounds(global_lower_bounds);
157  nlopt_->set_upper_bounds(global_upper_bounds);
158  }
159  } else {
160  // in the serial case we know this rank is root already
161  nlopt_->set_lower_bounds(lowerBounds);
162  nlopt_->set_upper_bounds(upperBounds);
163  }
164 
165  // Optimizer settings only need to be set on the root rank as it runs the actual NLopt optimizer
166  if (rank == root_rank_) {
167  // Process Integer options
168  if (o.Int.find("maxeval") != o.Int.end()) nlopt_->set_maxeval(o.Int["maxeval"]);
169 
170  // Process Double options
171  for (auto [optname, optval] : options_.Double) {
172  if (optname == "xtol_rel") {
173  nlopt_->set_xtol_rel(o.Double["xtol_rel"]); // various tolerance stuff ;)
174  } else {
175  nlopt_->set_param(optname.c_str(), optval);
176  }
177  }
178 
179  // Check if constraint_tol key exists in options.Double
180  if (options_.Double.find("constraint_tol") == options_.Double.end()) {
181  options_.Double["constraint_tol"] = 0.;
182  }
183  }
184 
185  // Create the go routine to start the optimization
186 
187  if (rank == root_rank_) {
188  // The root_rank uses nlopt to perform the optimization.
189  // Within NLoptFunctional there is a branch condition for the root_rank which broadcasts
190  // the current state to all non-root ranks.
191 
192  go.onGo([&]() {
193  nlopt_->set_min_objective(NLoptFunctional<T>, &obj_info_[0]);
194  for (auto& constraint : constraints_info_) {
195  nlopt_->add_inequality_constraint(NLoptFunctional<T>, &constraint, constraint.constraint_tol);
196  }
197 
198  nlopt_->optimize(global_variables_, final_obj);
199 
200  // propagate solution objective to all ranks
201  if (comm_ != MPI_COMM_NULL) {
202  std::vector<int> state{op::State::SOLUTION_FOUND};
203  op::mpi::Broadcast(state, 0, comm_);
204 
205  std::vector<double> obj(1, final_obj);
206  op::mpi::Broadcast(obj, 0, comm_);
207  }
208  }
209 
210  );
211  } else {
212  // Non-root ranks go into a wait loop where they wait to recieve the current state
213  // If evaluation calls are made `NLoptFunctional` is called
214 
215  waitloop_ = std::make_unique<WaitLoop>([&]() { return constraints_info_.size(); }, final_obj, comm_);
216 
217  // Add actions customized for NLopt to the waitloop-Fluent pattern
218 
219  (*waitloop_)
220  .onUpdate([&]() {
221  // recieve the incoming variables
222  std::vector<double> owned_data(num_local_owned_variables_);
223  std::vector<double> empty;
224  op::mpi::Scatterv(empty, owned_variables_per_rank_, owned_offsets_, owned_data, 0, comm_);
225  if (comm_pattern_.has_value()) {
226  // repropagate back to non-owning ranks
227  std::vector<double> local_data(variables_.data().size());
228  auto& owned_variable_list = comm_pattern_.value().owned_variable_list;
229 
230  // TODO: improve fix during refactor
231  std::vector<typename T::value_type> index_map;
232  for (auto id : owned_variable_list) {
233  index_map.push_back(global_reduced_map_to_local_.value()[id][0]);
234  }
235 
236  op::utility::accessPermuteStore(owned_data, index_map, local_data);
237 
238  variables_.data() = op::ReturnLocalUpdatedVariables(comm_pattern_.value().rank_communication,
239  global_reduced_map_to_local_.value(), local_data);
240  } else {
241  variables_.data() = owned_data;
242  }
243 
245  })
246  .onObjectiveGrad(
247  // obj_grad
248  [&]() {
249  // Implementation Note: See NLoptFunctional<T> about why grad is forced to be of size 1
250  std::vector<double> grad(variables_.data().size() > 0 ? variables_.data().size() : 1);
251  // Call NLoptFunctional on non-root-rank
252  NLoptFunctional<T>(variables_.data(), grad, &obj_info_[0]);
253  })
254  .onObjectiveEval(
255  // obj_Eval
256  [&]() {
257  std::vector<double> grad;
258  // Call NLoptFunctional on non-root-rank
259  NLoptFunctional<T>(variables_.data(), grad, &obj_info_[0]);
260  })
261  .onConstraintsEval(
262  // constraint states
263  [&](int state) {
264  // just an eval routine
265  std::vector<double> grad;
266  // Call NLoptFunctional on non-root-rank
267  NLoptFunctional<T>(variables_.data(), grad, &constraints_info_[static_cast<std::size_t>(state)]);
268  })
269  .onConstraintsGrad(
270  // constraint grad states
271  [&](int state) {
272  // this is a constraint gradient call
273  // Implementation Note: See NLoptFunctional<T> about why grad is forced to be of size 1
274  std::vector<double> grad(variables_.data().size() > 0 ? variables_.data().size() : 1);
275  // Call NLoptFunctional on non-root-rank
276  NLoptFunctional<T>(variables_.data(), grad, &constraints_info_[static_cast<std::size_t>(state)]);
277  })
278  .onSolution( // Solution state
279  [&]() {
280  // The solution has been found.. recieve the objective
281  std::vector<double> obj(1);
282  op::mpi::Broadcast(obj, 0, comm_);
283  final_obj = obj[0];
284  // exit this loop finally!
285  })
286  .onUnknown( // unknown state
287  [&](int state) {
288  // this is an unknown state!
289  std::cout << "Unknown State: " << state << std::endl;
290  MPI_Abort(comm_, state);
291  });
292 
293  // Set the Go function to use the waitloop functor we've just created
294  go.onGo([&]() { (*waitloop_)(); });
295  }
296  }
297 
298  void setObjective(op::Functional& o) override
299  {
300  obj_info_.clear();
301  obj_info_.emplace_back(
302  op::detail::FunctionalInfo<T>{.obj = o, .nlopt = *this, .state = State::OBJ_EVAL, .constraint_tol = 0.});
303  }
304 
305  void addConstraint(op::Functional& o) override
306  {
307  if (o.upper_bound != op::Functional::default_max) {
308  constraints_info_.emplace_back(op::detail::FunctionalInfo<T>{.obj = o,
309  .nlopt = *this,
310  .state = static_cast<int>(constraints_info_.size()),
311  .constraint_tol = options_.Double["constraint_tol"],
312  .constraint_val = o.upper_bound,
313  .lower_bound = false});
314  }
315  if (o.lower_bound != op::Functional::default_min) {
316  constraints_info_.emplace_back(op::detail::FunctionalInfo<T>{.obj = o,
317  .nlopt = *this,
318  .state = static_cast<int>(constraints_info_.size()),
319  .constraint_tol = options_.Double["constraint_tol"],
320  .constraint_val = o.lower_bound,
321  .lower_bound = true});
322  }
323  }
324 
331  bool variables_changed(const std::vector<double>& x)
332  {
333  assert(x.size() == previous_variables_.size());
334  for (std::size_t i = 0; i < x.size(); i++) {
335  if (previous_variables_[i] != x[i]) {
336  std::copy(x.begin(), x.end(), previous_variables_.begin());
337  return true;
338  }
339  }
340  return false;
341  }
342 
346  bool isAdvanced() { return comm_pattern_.has_value(); }
347 
352  std::function<std::vector<double>(const std::vector<double>&)> local_grad_func,
353  std::function<double(const std::vector<double>&)> local_reduce_func)
354  {
355  assert(comm_pattern_.has_value());
357  comm_pattern_.value().rank_communication, global_reduced_map_to_local_.value(),
358  comm_pattern_.value().owned_variable_list, local_grad_func, local_reduce_func, comm_);
359  }
360 
361 protected:
362  MPI_Comm comm_;
363  std::vector<double> global_variables_;
364  op::Vector<std::vector<double>>& variables_;
365 
366  std::unique_ptr<nlopt::opt> nlopt_;
367  NLoptOptions& options_;
368 
369  std::vector<double> previous_variables_;
370 
371  std::vector<detail::FunctionalInfo<T>> obj_info_;
372  std::vector<detail::FunctionalInfo<T>> constraints_info_;
373 
374  std::vector<int> owned_variables_per_rank_; // this needs to be `int` to satisify MPI
375  std::vector<int> owned_offsets_; // this needs to be `int` to satisfy MPI
376 
377  std::optional<utility::CommPattern<T>> comm_pattern_;
378 
379  std::optional<std::unordered_map<typename T::value_type, T>> global_reduced_map_to_local_;
380 
381  friend double NLoptFunctional<T>(const std::vector<double>& x, std::vector<double>& grad,
382  void* objective_and_optimizer);
383 
384  std::size_t num_local_owned_variables_;
385  int root_rank_ = 0;
386  std::unique_ptr<WaitLoop> waitloop_;
387 };
388 // end NLopt implementation
389 
390 // template deduction guide
391 template <typename T>
392 NLopt(op::Vector<std::vector<double>>, NLoptOptions&, MPI_Comm, utility::CommPattern<T>) -> NLopt<T>;
393 
405 template <typename T>
406 double NLoptFunctional(const std::vector<double>& x, std::vector<double>& grad, void* objective_and_optimizer)
407 {
408  auto info = static_cast<op::detail::FunctionalInfo<T>*>(objective_and_optimizer);
409  auto& optimizer = info->nlopt;
410  auto& objective = info->obj;
411 
412  // check if the optimizer is running in "serial" or "parallel"
413  if (optimizer.comm_ == MPI_COMM_NULL) {
414  // the optimizer is running in serial
415  if (optimizer.variables_changed(x)) {
416  optimizer.variables_.data() = x;
417  optimizer.UpdatedVariableCallback();
418  }
419  } else {
420  // the optimizer is running in parallel
421  auto rank = op::mpi::getRank(optimizer.comm_);
422 
423  if (rank == optimizer.root_rank_) {
424  if (optimizer.variables_changed(x)) {
425  // first thing to do is broadcast the state
426  std::vector<int> state{op::State::UPDATE_VARIABLES};
427  op::mpi::Broadcast(state, 0, optimizer.comm_);
428 
429  // have the root rank scatter variables back to "owning nodes"
430  std::vector<double> x_temp(x.begin(), x.end());
431  std::vector<double> new_data(optimizer.comm_pattern_.has_value()
432  ? optimizer.comm_pattern_.value().owned_variable_list.size()
433  : optimizer.variables_.data().size());
434  op::mpi::Scatterv(x_temp, optimizer.owned_variables_per_rank_, optimizer.owned_offsets_, new_data, 0,
435  optimizer.comm_);
436 
437  if (optimizer.comm_pattern_.has_value()) {
438  // repropagate back to non-owning ranks
439  std::vector<double> local_data(optimizer.variables_.data().size());
440  auto& owned_variable_list = optimizer.comm_pattern_.value().owned_variable_list;
441 
442  // TODO: improve fix during refactor
443  std::vector<typename T::value_type> index_map;
444  for (auto id : owned_variable_list) {
445  index_map.push_back(optimizer.global_reduced_map_to_local_.value()[id][0]);
446  }
447  op::utility::accessPermuteStore(new_data, index_map, local_data);
448 
449  optimizer.variables_.data() =
450  op::ReturnLocalUpdatedVariables(optimizer.comm_pattern_.value().rank_communication,
451  optimizer.global_reduced_map_to_local_.value(), new_data);
452  } else {
453  optimizer.variables_.data() = new_data;
454  }
455 
456  optimizer.UpdatedVariableCallback();
457  }
458 
459  // Next check if gradient needs to be called
460  if (grad.size() > 0) {
461  // check to see if it's an objective or constraint
462  std::vector<int> state{info->state < 0 ? op::State::OBJ_GRAD
463  : info->state + static_cast<int>(optimizer.constraints_info_.size())};
464  op::mpi::Broadcast(state, 0, optimizer.comm_);
465  } else {
466  // just eval routine
467  // check to see if it's an objective or constraint
468  std::vector<int> state(1, info->state < 0 ? op::State::OBJ_EVAL : info->state);
469  op::mpi::Broadcast(state, 0, optimizer.comm_);
470  }
471  }
472  }
473 
474  // At this point, the non-rank roots should be calling this method and we should get here
475  // Implementation Note:
476  // Serial nlopt just checks to see if grad.size() is empty
477  // In our parallel case, it is possible that our rank has no optimization variables, but is needed for the forward solves
478  // However, concatGlobalVector will concatenate across all participating ranks.
479  // This gets tricky since all ranks need to participate.
480  // FunctionalInfo<T> was design to be relative simple. However it does not help with differentiating between eval and gradient calls. Nlopt usually checks grad.size() for this situation.
481  // tldr; At the moment, we will force grad to be at least of size 1, even if it doesn't own any variables to make sure the reduction happens. This is to use a simple paralelism model fo the nlopt_op implementation.
482 
483  if (grad.size() > 0 ) {
484 
485  auto owned_grad = objective.EvalGradient(optimizer.variables_.data());
486 
487  // check if "serial" or parallel. If we're running in serial we're already done.
488  if (optimizer.comm_ != MPI_COMM_NULL) {
489  grad = op::utility::parallel::concatGlobalVector(static_cast<std::size_t>(optimizer.owned_offsets_.back()),
490  optimizer.owned_variables_per_rank_, optimizer.owned_offsets_,
491  owned_grad, false, 0, optimizer.comm_); // gather on rank 0
492  } else {
493  grad = owned_grad;
494  }
495 
496  // check if this is a lower_bound constraint and negate gradient
497  if (info->state >= 0 && info->lower_bound) {
498  for (auto& g : grad) {
499  g *= -1.;
500  }
501  }
502  }
503 
504  // modify objective evaluation just for constraints
505  if (info->state >= 0) {
506  if (info->lower_bound) {
511  return -(objective.Eval(optimizer.variables_.data()) - info->constraint_val);
512  }
518  return objective.Eval(optimizer.variables_.data()) - info->constraint_val;
519  }
520 
521  return objective.Eval(optimizer.variables_.data());
522 }
523 
524 } // namespace op
Go & onGo(const CallbackFn &go)
Define Go action.
Definition: op.hpp:36
int getRank(MPI_Comm comm=MPI_COMM_WORLD)
Get rank.
Definition: op_mpi.hpp:50
double final_obj
final objective value
Definition: op.hpp:221
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Broadcast(T &buf, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast a single element to all ranks on the communicator.
Definition: op_mpi.hpp:106
Abstracted Optimization Vector container.
Definition: op.hpp:80
auto inverseMap(T &vector_map)
Inverts a vector that providse a map into an unordered_map.
Definition: op_utility.hpp:417
Abstracted Optimizer implementation.
Definition: op.hpp:165
V concatGlobalVector(typename V::size_type global_size, std::vector< int > &variables_per_rank, std::vector< int > &offsets, V &local_vector, bool gatherAll=true, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Assemble a vector by concatination of local_vector across all ranks on a communicator.
Definition: op_utility.hpp:99
op::Go go
Go function to start optimization.
Definition: op.hpp:209
auto generateReducedLocalGradientFunction(std::function< std::vector< double >(const std::vector< double > &)> local_grad_func, std::function< double(const std::vector< double > &)> local_reduce_func)
generates reduced local gradient using comm_pattern_
Definition: nlopt_op.hpp:351
bool variables_changed(const std::vector< double > &x)
Method to see if variables changed, if they have set new x.
Definition: nlopt_op.hpp:331
ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication< T > &info, I &global_ids_to_local, ValuesType &reduced_values)
Generate update method to propagate owned local variables back to local variables in parallel...
Definition: op.hpp:313
VectorType & data()
Get the underlying data.
Definition: op.hpp:96
auto OwnedLocalObjectiveGradientFunction(utility::RankCommunication< T > &info, I &global_ids_to_local, T &reduced_id_list, std::function< std::vector< double >(const std::vector< double > &)> local_obj_grad_func, std::function< double(const std::vector< double > &)> local_reduce_func, MPI_Comm comm=MPI_COMM_WORLD)
Generate an objective gradient function that takes local variables and reduces them in parallel to lo...
Definition: op.hpp:283
double upper_bound
Upper bounds for this optimization functional.
Definition: op.hpp:157
bool isAdvanced()
returns whether NLopt is in &quot;advanced&quot; mode or not
Definition: nlopt_op.hpp:346
void accessPermuteStore(T &vector, M &map, T &results)
Retrieves from T and stores in permuted mapping M, result[M[i]] = T[i].
Definition: op_utility.hpp:303
Options specific for nlopt. They are made to look like ipopt&#39;s interface.
Definition: nlopt_op.hpp:55
void setObjective(op::Functional &o) override
Sets the optimization objective.
Definition: nlopt_op.hpp:298
T permuteMapAccessStore(T &vector, M &map, I &global_ids_of_local_vector)
Retrieves from T using a permuted mapping M and index mapping I stores in order, result[i] = T[I[M[i]...
Definition: op_utility.hpp:390
Container to pass objective and optimizer.
Definition: nlopt_op.hpp:21
std::vector< T > buildInclusiveOffsets(std::vector< T > &values_per_rank)
Takes in sizes per index and and performs a rank-local inclusive offset.
Definition: op_utility.hpp:43
A op::optimizer implementation for NLopt.
Definition: nlopt_op.hpp:15
double NLoptFunctional(const std::vector< double > &x, std::vector< double > &grad, void *objective_and_optimizer)
Takes in a op::Functional and computes the objective function and it&#39;s gradient as a nlopt function...
Definition: nlopt_op.hpp:406
Abstracted Objective Functional class.
Definition: op.hpp:118
void addConstraint(op::Functional &o) override
Adds a constraint for the optimization problem.
Definition: nlopt_op.hpp:305
auto wrapNLoptFunc(std::function< double(unsigned, const double *, double *, void *)> func)
wraps any nltop::function into an objective call and a gradient call
Definition: nlopt_op.hpp:40
std::vector< std::size_t > nlopt_index_type
Default nlopt type.
Definition: nlopt_op.hpp:33
double lower_bound
Lower bounds for this optimization functional.
Definition: op.hpp:154
void UpdatedVariableCallback()
What to do when the variables are updated.
Definition: op.hpp:194
NLopt(op::Vector< std::vector< double >> &variables, NLoptOptions &o, std::optional< MPI_Comm > comm={}, std::optional< op::utility::CommPattern< T >> comm_pattern_info={})
Constructor for our optimizer.
Definition: nlopt_op.hpp:67
int Scatterv(T &sendbuf, std::vector< int > &variables_per_rank, std::vector< int > &offsets, T &recvbuff, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
MPI_Scatterv on std::collections. Send only portions of buff to ranks.
Definition: op_mpi.hpp:175