4 #include "op_waitloop.hpp"
11 double NLoptFunctional(
const std::vector<double>& x, std::vector<double>& grad,
void* objective_and_optimizer);
25 double constraint_tol = 0.;
26 double constraint_val = 0.;
27 bool lower_bound =
false;
40 auto wrapNLoptFunc(std::function<
double(
unsigned,
const double*,
double*,
void*)> func)
42 auto obj_eval = [=](
const std::vector<double>& x) {
43 return func(static_cast<unsigned int>(x.size()), x.data(),
nullptr,
nullptr);
46 auto obj_grad = [=](
const std::vector<double>& x) {
47 std::vector<double> grad(x.size());
48 func(static_cast<unsigned int>(x.size()), x.data(), grad.data(),
nullptr);
51 return std::make_tuple<op::Functional::EvalObjectiveFn, op::Functional::EvalObjectiveGradFn>(obj_eval, obj_grad);
56 std::unordered_map<std::string, int> Int;
57 std::unordered_map<std::string, double> Double;
58 std::unordered_map<std::string, std::string> String;
59 nlopt::algorithm algorithm = nlopt::LD_MMA;
63 template <
typename T = nlopt_index_type>
68 std::optional<op::utility::CommPattern<T>> comm_pattern_info = {})
69 : comm_(comm.has_value() ? comm.value() : MPI_COMM_NULL),
71 variables_(variables),
73 comm_pattern_(comm_pattern_info),
74 global_reduced_map_to_local_({}),
75 num_local_owned_variables_(0)
77 std::cout <<
"NLOpt wrapper constructed" << std::endl;
84 if (comm_pattern_.has_value()) {
86 auto& comm_pattern = comm_pattern_.value();
87 num_local_owned_variables_ = comm_pattern.owned_variable_list.size();
91 num_local_owned_variables_ = variables.data().size();
97 if (comm_ != MPI_COMM_NULL) {
98 auto [global_size, variables_per_rank] =
99 op::utility::parallel::gatherVariablesPerRank<int>(num_local_owned_variables_);
100 owned_variables_per_rank_ = variables_per_rank;
103 if (rank == root_rank_) {
104 global_variables_.resize(static_cast<std::size_t>(global_size));
107 global_variables_.resize(variables_.
data().size());
111 nlopt_ = std::make_unique<nlopt::opt>(options_.algorithm, global_variables_.size());
114 auto lowerBounds = variables.lowerBounds();
115 auto upperBounds = variables.upperBounds();
119 auto& reduced_variable_list = comm_pattern_.value().owned_variable_list;
129 auto& reduced_variable_list = comm_pattern_.value().owned_variable_list;
131 variables.data(), reduced_variable_list, global_reduced_map_to_local_.value());
132 previous_variables_ =
134 reduced_previous_local_variables,
false);
136 }
else if (comm_ != MPI_COMM_NULL) {
138 previous_variables_ =
140 variables.data(),
false);
143 previous_variables_ = variables.data();
147 global_variables_ = previous_variables_;
149 if (comm_ != MPI_COMM_NULL) {
151 global_variables_.size(), owned_variables_per_rank_, owned_offsets_, lowerBounds,
false);
154 global_variables_.size(), owned_variables_per_rank_, owned_offsets_, upperBounds,
false);
155 if (rank == root_rank_) {
156 nlopt_->set_lower_bounds(global_lower_bounds);
157 nlopt_->set_upper_bounds(global_upper_bounds);
161 nlopt_->set_lower_bounds(lowerBounds);
162 nlopt_->set_upper_bounds(upperBounds);
166 if (rank == root_rank_) {
168 if (o.Int.find(
"maxeval") != o.Int.end()) nlopt_->set_maxeval(o.Int[
"maxeval"]);
171 for (
auto [optname, optval] : options_.Double) {
172 if (optname ==
"xtol_rel") {
173 nlopt_->set_xtol_rel(o.Double[
"xtol_rel"]);
175 nlopt_->set_param(optname.c_str(), optval);
180 if (options_.Double.find(
"constraint_tol") == options_.Double.end()) {
181 options_.Double[
"constraint_tol"] = 0.;
187 if (rank == root_rank_) {
193 nlopt_->set_min_objective(NLoptFunctional<T>, &obj_info_[0]);
194 for (
auto& constraint : constraints_info_) {
195 nlopt_->add_inequality_constraint(NLoptFunctional<T>, &constraint, constraint.constraint_tol);
198 nlopt_->optimize(global_variables_,
final_obj);
201 if (comm_ != MPI_COMM_NULL) {
202 std::vector<int> state{op::State::SOLUTION_FOUND};
215 waitloop_ = std::make_unique<WaitLoop>([&]() {
return constraints_info_.size(); },
final_obj, comm_);
222 std::vector<double> owned_data(num_local_owned_variables_);
223 std::vector<double> empty;
224 op::mpi::Scatterv(empty, owned_variables_per_rank_, owned_offsets_, owned_data, 0, comm_);
225 if (comm_pattern_.has_value()) {
227 std::vector<double> local_data(variables_.
data().size());
228 auto& owned_variable_list = comm_pattern_.value().owned_variable_list;
231 std::vector<typename T::value_type> index_map;
232 for (
auto id : owned_variable_list) {
233 index_map.push_back(global_reduced_map_to_local_.value()[id][0]);
239 global_reduced_map_to_local_.value(), local_data);
241 variables_.
data() = owned_data;
250 std::vector<double> grad(variables_.
data().size() > 0 ? variables_.
data().size() : 1);
252 NLoptFunctional<T>(variables_.
data(), grad, &obj_info_[0]);
257 std::vector<double> grad;
259 NLoptFunctional<T>(variables_.
data(), grad, &obj_info_[0]);
265 std::vector<double> grad;
267 NLoptFunctional<T>(variables_.
data(), grad, &constraints_info_[
static_cast<std::size_t
>(state)]);
274 std::vector<double> grad(variables_.
data().size() > 0 ? variables_.
data().size() : 1);
276 NLoptFunctional<T>(variables_.
data(), grad, &constraints_info_[
static_cast<std::size_t
>(state)]);
281 std::vector<double> obj(1);
289 std::cout <<
"Unknown State: " << state << std::endl;
290 MPI_Abort(comm_, state);
294 go.
onGo([&]() { (*waitloop_)(); });
301 obj_info_.emplace_back(
307 if (o.
upper_bound != op::Functional::default_max) {
310 .state =
static_cast<int>(constraints_info_.size()),
311 .constraint_tol = options_.Double[
"constraint_tol"],
313 .lower_bound =
false});
315 if (o.
lower_bound != op::Functional::default_min) {
318 .state =
static_cast<int>(constraints_info_.size()),
319 .constraint_tol = options_.Double[
"constraint_tol"],
321 .lower_bound =
true});
333 assert(x.size() == previous_variables_.size());
334 for (std::size_t i = 0; i < x.size(); i++) {
335 if (previous_variables_[i] != x[i]) {
336 std::copy(x.begin(), x.end(), previous_variables_.begin());
352 std::function<std::vector<double>(
const std::vector<double>&)> local_grad_func,
353 std::function<
double(
const std::vector<double>&)> local_reduce_func)
355 assert(comm_pattern_.has_value());
357 comm_pattern_.value().rank_communication, global_reduced_map_to_local_.value(),
358 comm_pattern_.value().owned_variable_list, local_grad_func, local_reduce_func, comm_);
363 std::vector<double> global_variables_;
366 std::unique_ptr<nlopt::opt> nlopt_;
369 std::vector<double> previous_variables_;
371 std::vector<detail::FunctionalInfo<T>> obj_info_;
372 std::vector<detail::FunctionalInfo<T>> constraints_info_;
374 std::vector<int> owned_variables_per_rank_;
375 std::vector<int> owned_offsets_;
377 std::optional<utility::CommPattern<T>> comm_pattern_;
379 std::optional<std::unordered_map<typename T::value_type, T>> global_reduced_map_to_local_;
381 friend double NLoptFunctional<T>(
const std::vector<double>& x, std::vector<double>& grad,
382 void* objective_and_optimizer);
384 std::size_t num_local_owned_variables_;
386 std::unique_ptr<WaitLoop> waitloop_;
391 template <
typename T>
392 NLopt(
op::Vector<std::vector<double>>, NLoptOptions&, MPI_Comm, utility::CommPattern<T>) -> NLopt<T>;
405 template <
typename T>
406 double NLoptFunctional(
const std::vector<double>& x, std::vector<double>& grad,
void* objective_and_optimizer)
409 auto& optimizer = info->nlopt;
410 auto& objective = info->obj;
413 if (optimizer.comm_ == MPI_COMM_NULL) {
415 if (optimizer.variables_changed(x)) {
416 optimizer.variables_.data() = x;
417 optimizer.UpdatedVariableCallback();
423 if (rank == optimizer.root_rank_) {
424 if (optimizer.variables_changed(x)) {
426 std::vector<int> state{op::State::UPDATE_VARIABLES};
430 std::vector<double> x_temp(x.begin(), x.end());
431 std::vector<double> new_data(optimizer.comm_pattern_.has_value()
432 ? optimizer.comm_pattern_.value().owned_variable_list.size()
433 : optimizer.variables_.data().size());
434 op::mpi::Scatterv(x_temp, optimizer.owned_variables_per_rank_, optimizer.owned_offsets_, new_data, 0,
437 if (optimizer.comm_pattern_.has_value()) {
439 std::vector<double> local_data(optimizer.variables_.data().size());
440 auto& owned_variable_list = optimizer.comm_pattern_.value().owned_variable_list;
443 std::vector<typename T::value_type> index_map;
444 for (
auto id : owned_variable_list) {
445 index_map.push_back(optimizer.global_reduced_map_to_local_.value()[id][0]);
449 optimizer.variables_.data() =
451 optimizer.global_reduced_map_to_local_.value(), new_data);
453 optimizer.variables_.data() = new_data;
456 optimizer.UpdatedVariableCallback();
460 if (grad.size() > 0) {
462 std::vector<int> state{info->state < 0 ? op::State::OBJ_GRAD
463 : info->state +
static_cast<int>(optimizer.constraints_info_.size())};
468 std::vector<int> state(1, info->state < 0 ? op::State::OBJ_EVAL : info->state);
483 if (grad.size() > 0 ) {
485 auto owned_grad = objective.EvalGradient(optimizer.variables_.data());
488 if (optimizer.comm_ != MPI_COMM_NULL) {
490 optimizer.owned_variables_per_rank_, optimizer.owned_offsets_,
491 owned_grad,
false, 0, optimizer.comm_);
497 if (info->state >= 0 && info->lower_bound) {
498 for (
auto& g : grad) {
505 if (info->state >= 0) {
506 if (info->lower_bound) {
511 return -(objective.Eval(optimizer.variables_.data()) - info->constraint_val);
518 return objective.Eval(optimizer.variables_.data()) - info->constraint_val;
521 return objective.Eval(optimizer.variables_.data());
Go & onGo(const CallbackFn &go)
Define Go action.
int getRank(MPI_Comm comm=MPI_COMM_WORLD)
Get rank.
double final_obj
final objective value
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Broadcast(T &buf, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast a single element to all ranks on the communicator.
Abstracted Optimization Vector container.
auto inverseMap(T &vector_map)
Inverts a vector that providse a map into an unordered_map.
Abstracted Optimizer implementation.
V concatGlobalVector(typename V::size_type global_size, std::vector< int > &variables_per_rank, std::vector< int > &offsets, V &local_vector, bool gatherAll=true, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Assemble a vector by concatination of local_vector across all ranks on a communicator.
op::Go go
Go function to start optimization.
auto generateReducedLocalGradientFunction(std::function< std::vector< double >(const std::vector< double > &)> local_grad_func, std::function< double(const std::vector< double > &)> local_reduce_func)
generates reduced local gradient using comm_pattern_
bool variables_changed(const std::vector< double > &x)
Method to see if variables changed, if they have set new x.
ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication< T > &info, I &global_ids_to_local, ValuesType &reduced_values)
Generate update method to propagate owned local variables back to local variables in parallel...
VectorType & data()
Get the underlying data.
auto OwnedLocalObjectiveGradientFunction(utility::RankCommunication< T > &info, I &global_ids_to_local, T &reduced_id_list, std::function< std::vector< double >(const std::vector< double > &)> local_obj_grad_func, std::function< double(const std::vector< double > &)> local_reduce_func, MPI_Comm comm=MPI_COMM_WORLD)
Generate an objective gradient function that takes local variables and reduces them in parallel to lo...
double upper_bound
Upper bounds for this optimization functional.
bool isAdvanced()
returns whether NLopt is in "advanced" mode or not
void accessPermuteStore(T &vector, M &map, T &results)
Retrieves from T and stores in permuted mapping M, result[M[i]] = T[i].
Options specific for nlopt. They are made to look like ipopt's interface.
void setObjective(op::Functional &o) override
Sets the optimization objective.
T permuteMapAccessStore(T &vector, M &map, I &global_ids_of_local_vector)
Retrieves from T using a permuted mapping M and index mapping I stores in order, result[i] = T[I[M[i]...
Container to pass objective and optimizer.
std::vector< T > buildInclusiveOffsets(std::vector< T > &values_per_rank)
Takes in sizes per index and and performs a rank-local inclusive offset.
A op::optimizer implementation for NLopt.
double NLoptFunctional(const std::vector< double > &x, std::vector< double > &grad, void *objective_and_optimizer)
Takes in a op::Functional and computes the objective function and it's gradient as a nlopt function...
Abstracted Objective Functional class.
void addConstraint(op::Functional &o) override
Adds a constraint for the optimization problem.
auto wrapNLoptFunc(std::function< double(unsigned, const double *, double *, void *)> func)
wraps any nltop::function into an objective call and a gradient call
std::vector< std::size_t > nlopt_index_type
Default nlopt type.
double lower_bound
Lower bounds for this optimization functional.
void UpdatedVariableCallback()
What to do when the variables are updated.
NLopt(op::Vector< std::vector< double >> &variables, NLoptOptions &o, std::optional< MPI_Comm > comm={}, std::optional< op::utility::CommPattern< T >> comm_pattern_info={})
Constructor for our optimizer.
int Scatterv(T &sendbuf, std::vector< int > &variables_per_rank, std::vector< int > &offsets, T &recvbuff, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
MPI_Scatterv on std::collections. Send only portions of buff to ranks.