OP  0.1
OP is a optimization solver plugin package
 All Classes Namespaces Functions Variables Typedefs Enumerations Friends
op.hpp
1 #pragma once
2 
3 #include <functional>
4 #include <vector>
5 #include <memory>
6 #include <dlfcn.h>
7 #include <iostream>
8 #include <mpi.h>
9 #include <tuple>
10 #include <numeric>
11 #include <cassert>
12 #include <optional>
13 #include <algorithm>
14 #include <set>
15 #include "op_utility.hpp"
16 
18 namespace op {
19 
21 using CallbackFn = std::function<void()>;
22 
26 class Go {
27 public:
29  Go& onPreprocess(const CallbackFn& preprocess)
30  {
31  preprocess_ = preprocess;
32  return *this;
33  }
34 
36  Go& onGo(const CallbackFn& go)
37  {
38  go_ = go;
39  return *this;
40  }
41 
42  // Define the operator
43  void operator()()
44  {
45  preprocess_();
46  go_();
47  }
48 
49 protected:
50  CallbackFn go_;
51  CallbackFn preprocess_;
52 };
53 
54 namespace Variables {
55 
57 template <class Variables, class FieldType>
58 class VariableMap {
59 public:
60  using ToTypeFn = std::function<FieldType(Variables&)>;
61  using FromTypeFn = std::function<Variables(FieldType&)>;
62 
63  VariableMap(ToTypeFn to_fn, FromTypeFn from_fn) : toType_(to_fn), fromType_(from_fn) {}
64 
65  FieldType convertFromVariable(Variables& v) { return toType_(v); }
66  Variables convertToVariable(FieldType& f) { return fromType(f); }
67 
68 protected:
69  ToTypeFn toType_;
70  FromTypeFn fromType_;
71 };
72 } // namespace Variables
73 
79 template <class VectorType>
80 class Vector {
81 public:
82  using ScatterFn = std::function<VectorType()>;
83  using GatherFn = std::function<VectorType()>;
84  using BoundsFn = std::function<VectorType()>;
85 
86  Vector(VectorType& data, BoundsFn lowerBounds, BoundsFn upperBounds)
87  : lowerBounds_(lowerBounds),
88  upperBounds_(upperBounds),
89  data_(data),
90  gather([&]() { return data; }),
91  scatter([&]() { return data; })
92  {
93  }
94 
96  VectorType& data() { return data_; }
97 
99  VectorType lowerBounds() { return lowerBounds_(); }
100 
102  VectorType upperBounds() { return upperBounds_(); }
103 
104 protected:
105  BoundsFn lowerBounds_;
106  BoundsFn upperBounds_;
107  VectorType& data_;
108 
109 public:
111  GatherFn gather;
112 
114  ScatterFn scatter;
115 };
116 
118 class Functional {
119 public:
120  using ResultType = double;
121  using SensitivityType = std::vector<double>;
122  using EvalObjectiveFn = std::function<ResultType(const std::vector<double>&)>;
123  using EvalObjectiveGradFn = std::function<SensitivityType(const std::vector<double>&)>;
124 
125  static constexpr double default_min = -std::numeric_limits<double>::max();
126  static constexpr double default_max = std::numeric_limits<double>::max();
127 
134  Functional(EvalObjectiveFn obj, EvalObjectiveGradFn grad, double lb = default_min, double ub = default_max)
135  : lower_bound(lb), upper_bound(ub), obj_(obj), grad_(grad)
136  {
137  }
138 
144  ResultType Eval(const std::vector<double>& v) { return obj_(v); }
145 
151  SensitivityType EvalGradient(const std::vector<double>& v) { return grad_(v); }
152 
154  double lower_bound;
155 
157  double upper_bound;
158 
159 protected:
160  EvalObjectiveFn obj_;
161  EvalObjectiveGradFn grad_;
162 };
163 
165 class Optimizer {
166 public:
168  explicit Optimizer() : update([]() {}), iterate([]() {}), save([]() {}), final_obj(std::numeric_limits<double>::max())
169  {
170  }
171 
172  /* the following methods are needed for different optimizers */
173 
179  virtual void setObjective(Functional& o) = 0;
180 
186  virtual void addConstraint(Functional&) {}
187 
188  /* The following methods are hooks that are different for each optimization problem */
189 
191  void Go() { go(); }
192 
195 
197  virtual double Solution() { return final_obj; }
198 
200  void Iteration() { return iterate(); };
201 
203  void SaveState() { return save(); }
204 
206  virtual ~Optimizer() = default;
207 
210 
213 
216 
219 
221  double final_obj;
222 };
223 
230 template <class OptType, typename... Args>
231 std::unique_ptr<OptType> PluginOptimizer(std::string optimizer_path, Args&&... args)
232 {
233  void* optimizer_plugin = dlopen(optimizer_path.c_str(), RTLD_LAZY);
234 
235  if (!optimizer_plugin) {
236  std::cout << dlerror() << std::endl;
237  return nullptr;
238  }
239 
240  auto load_optimizer =
241  reinterpret_cast<std::unique_ptr<OptType> (*)(Args...)>(dlsym(optimizer_plugin, "load_optimizer"));
242  if (load_optimizer) {
243  return load_optimizer(std::forward<Args>(args)...);
244  } else {
245  return nullptr;
246  }
247 }
248 
256 template <typename V, typename T>
257 auto ReduceObjectiveFunction(const std::function<V(T)>& local_func, MPI_Op op, MPI_Comm comm = MPI_COMM_WORLD)
258 {
259  return [=](T variables) {
260  V local_sum = local_func(variables);
261  V global_sum;
262  auto error = op::mpi::Allreduce(local_sum, global_sum, op, comm);
263  if (error != MPI_SUCCESS) {
264  std::cout << "MPI_Error" << __FILE__ << __LINE__ << std::endl;
265  }
266  return global_sum;
267  };
268 }
269 
282 template <typename T, typename I>
284  utility::RankCommunication<T>& info, I& global_ids_to_local, T& reduced_id_list,
285  std::function<std::vector<double>(const std::vector<double>&)> local_obj_grad_func,
286  std::function<double(const std::vector<double>&)> local_reduce_func, MPI_Comm comm = MPI_COMM_WORLD)
287 {
288  return [=, &info, &global_ids_to_local](const std::vector<double>& local_variables) {
289  // First we send any local gradient information to the ranks that "own" the variables
290  auto local_obj_gradient = local_obj_grad_func(local_variables);
291  auto recv_data = op::utility::parallel::sendToOwners(info, local_obj_gradient, comm);
292  auto combine_data =
293  op::utility::remapRecvDataIncludeLocal(info.recv, recv_data, global_ids_to_local, local_obj_gradient);
294  std::vector<double> reduced_local_variables = op::utility::reduceRecvData(combine_data, local_reduce_func);
295  // At this point the data should be reduced but it's still in the local-data view
296  return op::utility::permuteMapAccessStore(reduced_local_variables, reduced_id_list, global_ids_to_local);
297  };
298 }
299 
312 template <typename T, typename I, typename ValuesType>
313 ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication<T>& info, I& global_ids_to_local,
314  ValuesType& reduced_values)
315 {
316  auto returned_data = op::utility::parallel::returnToSender(info, reduced_values);
317  auto returned_remapped_data =
318  op::utility::remapRecvDataIncludeLocal(info.send, returned_data, global_ids_to_local, reduced_values);
319  ValuesType updated_local_variables;
320  if (info.send.size() == 0) {
321  // we own all the variables
322  updated_local_variables = reduced_values;
323  } else {
324  updated_local_variables = op::utility::reduceRecvData(
325  returned_remapped_data,
326  op::utility::reductions::firstOfCollection<typename decltype(returned_remapped_data)::mapped_type>);
327  }
328  return updated_local_variables;
329 }
330 
337 template <typename T>
338 auto AdvancedRegistration(T& global_ids_on_rank, int root = 0, MPI_Comm mpicomm = MPI_COMM_WORLD)
339 {
340 
341  // check if global_ids_on_rank are unique
342  std::set<typename T::value_type> label_set(global_ids_on_rank.begin(), global_ids_on_rank.end());
343  assert(label_set.size() == global_ids_on_rank.size());
344 
345  auto [global_size, variables_per_rank] =
346  op::utility::parallel::gatherVariablesPerRank<int>(global_ids_on_rank.size(), true, root, mpicomm);
347  auto offsets = op::utility::buildInclusiveOffsets(variables_per_rank);
348  auto all_global_ids_array = op::utility::parallel::concatGlobalVector(global_size, variables_per_rank,
349  global_ids_on_rank, true, root, mpicomm);
350 
351  auto global_local_map = op::utility::inverseMap(global_ids_on_rank);
352  auto recv_send_info =
353  op::utility::parallel::generateSendRecievePerRank(global_local_map, all_global_ids_array, offsets, mpicomm);
354  auto reduced_dvs_on_rank = op::utility::filterOut(global_ids_on_rank, recv_send_info.send);
355 
356  return op::utility::CommPattern{recv_send_info, reduced_dvs_on_rank, global_ids_on_rank};
357 }
358 
359 } // namespace op
VectorType lowerBounds()
Get the lower bounds for each local optimization variable.
Definition: op.hpp:99
Go & onGo(const CallbackFn &go)
Define Go action.
Definition: op.hpp:36
Utility class for &quot;converting&quot; between Variables and something else.
Definition: op.hpp:58
virtual ~Optimizer()=default
Destructor.
double final_obj
final objective value
Definition: op.hpp:221
Functional(EvalObjectiveFn obj, EvalObjectiveGradFn grad, double lb=default_min, double ub=default_max)
Objective container class.
Definition: op.hpp:134
void Iteration()
What to do at the end of an optimization iteration.
Definition: op.hpp:200
Abstracted Optimization Vector container.
Definition: op.hpp:80
auto inverseMap(T &vector_map)
Inverts a vector that providse a map into an unordered_map.
Definition: op_utility.hpp:417
auto filterOut(const T &global_local_ids, std::unordered_map< int, std::vector< typename T::size_type >> &filter)
remove values in filter that correspond to global_local_ids
Definition: op_utility.hpp:561
Abstracted Optimizer implementation.
Definition: op.hpp:165
auto returnToSender(RankCommunication< T > &info, const V &local_data, MPI_Comm comm=MPI_COMM_WORLD)
transfer back data in reverse from sendToOwners
Definition: op_utility.hpp:244
V concatGlobalVector(typename V::size_type global_size, std::vector< int > &variables_per_rank, std::vector< int > &offsets, V &local_vector, bool gatherAll=true, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Assemble a vector by concatination of local_vector across all ranks on a communicator.
Definition: op_utility.hpp:99
SensitivityType EvalGradient(const std::vector< double > &v)
return the objective gradient evaluation
Definition: op.hpp:151
op::Go go
Go function to start optimization.
Definition: op.hpp:209
Optimizer()
Ctor has deferred initialization.
Definition: op.hpp:168
CallbackFn save
callback for saving current optimizer state
Definition: op.hpp:218
std::function< void()> CallbackFn
Callback function type.
Definition: op.hpp:21
ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication< T > &info, I &global_ids_to_local, ValuesType &reduced_values)
Generate update method to propagate owned local variables back to local variables in parallel...
Definition: op.hpp:313
virtual void setObjective(Functional &o)=0
Sets the optimization objective.
CallbackFn update
Update callback to compute before function calculations.
Definition: op.hpp:212
auto AdvancedRegistration(T &global_ids_on_rank, int root=0, MPI_Comm mpicomm=MPI_COMM_WORLD)
Definition: op.hpp:338
std::unique_ptr< OptType > PluginOptimizer(std::string optimizer_path, Args &&...args)
Dynamically load an Optimizer.
Definition: op.hpp:231
VectorType & data()
Get the underlying data.
Definition: op.hpp:96
ScatterFn scatter
Scatter data function.
Definition: op.hpp:114
auto OwnedLocalObjectiveGradientFunction(utility::RankCommunication< T > &info, I &global_ids_to_local, T &reduced_id_list, std::function< std::vector< double >(const std::vector< double > &)> local_obj_grad_func, std::function< double(const std::vector< double > &)> local_reduce_func, MPI_Comm comm=MPI_COMM_WORLD)
Generate an objective gradient function that takes local variables and reduces them in parallel to lo...
Definition: op.hpp:283
double upper_bound
Upper bounds for this optimization functional.
Definition: op.hpp:157
void Go()
Start the optimization.
Definition: op.hpp:191
auto remapRecvDataIncludeLocal(std::unordered_map< int, T > &recv, std::unordered_map< int, V > &recv_data, std::unordered_map< typename T::value_type, T > &global_to_local_map, V &local_variables)
rearrange data so that map[rank]-&gt;local_ids and map[rank] -&gt; V becomes map[local_ids]-&gt;values ...
Definition: op_utility.hpp:497
GatherFn gather
Gather data function.
Definition: op.hpp:111
T permuteMapAccessStore(T &vector, M &map, I &global_ids_of_local_vector)
Retrieves from T using a permuted mapping M and index mapping I stores in order, result[i] = T[I[M[i]...
Definition: op_utility.hpp:390
Go & onPreprocess(const CallbackFn &preprocess)
Define preprocess action.
Definition: op.hpp:29
VectorType upperBounds()
Get the upper bounds for each local optimization variable.
Definition: op.hpp:102
Complete Op communication pattern information.
Definition: op_utility.hpp:27
std::vector< T > buildInclusiveOffsets(std::vector< T > &values_per_rank)
Takes in sizes per index and and performs a rank-local inclusive offset.
Definition: op_utility.hpp:43
std::unordered_map< int, V > sendToOwners(RankCommunication< T > &info, V &local_data, MPI_Comm comm=MPI_COMM_WORLD)
transfer data to owners
Definition: op_utility.hpp:199
M::mapped_type reduceRecvData(M &remapped_data, std::function< typename M::mapped_type::value_type(const typename M::mapped_type &)> reduce_op)
apply reduction operation to recieved data
Definition: op_utility.hpp:519
void SaveState()
Saves the state of the optimizer.
Definition: op.hpp:203
Abstracted Objective Functional class.
Definition: op.hpp:118
virtual void addConstraint(Functional &)
Adds a constraint for the optimization problem.
Definition: op.hpp:186
auto ReduceObjectiveFunction(const std::function< V(T)> &local_func, MPI_Op op, MPI_Comm comm=MPI_COMM_WORLD)
Generate an objective function that performs a global reduction.
Definition: op.hpp:257
Holds communication information to and from rank.
Definition: op_utility.hpp:18
virtual double Solution()
What to do when the solution is found. Return the objetive.
Definition: op.hpp:197
double lower_bound
Lower bounds for this optimization functional.
Definition: op.hpp:154
Definition: op.hpp:26
CallbackFn iterate
iterate callback to compute before
Definition: op.hpp:215
ResultType Eval(const std::vector< double > &v)
Return the objective evaluation.
Definition: op.hpp:144
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Allreduce(T &local, T &global, MPI_Op operation, MPI_Comm comm=MPI_COMM_WORLD)
All reduce a single element across all ranks in a communicator.
Definition: op_mpi.hpp:75
void UpdatedVariableCallback()
What to do when the variables are updated.
Definition: op.hpp:194
RankCommunication< T > generateSendRecievePerRank(M local_ids, T &all_global_local_ids, I &offsets, MPI_Comm comm=MPI_COMM_WORLD)
given a map of local_ids and global_ids determine send and recv communications
Definition: op_utility.hpp:136