15 #include "op_utility.hpp"
31 preprocess_ = preprocess;
57 template <
class Variables,
class FieldType>
60 using ToTypeFn = std::function<FieldType(Variables&)>;
61 using FromTypeFn = std::function<Variables(FieldType&)>;
63 VariableMap(ToTypeFn to_fn, FromTypeFn from_fn) : toType_(to_fn), fromType_(from_fn) {}
65 FieldType convertFromVariable(Variables& v) {
return toType_(v); }
66 Variables convertToVariable(FieldType& f) {
return fromType(f); }
79 template <
class VectorType>
82 using ScatterFn = std::function<VectorType()>;
83 using GatherFn = std::function<VectorType()>;
84 using BoundsFn = std::function<VectorType()>;
87 : lowerBounds_(lowerBounds),
88 upperBounds_(upperBounds),
96 VectorType&
data() {
return data_; }
105 BoundsFn lowerBounds_;
106 BoundsFn upperBounds_;
120 using ResultType = double;
121 using SensitivityType = std::vector<double>;
122 using EvalObjectiveFn = std::function<ResultType(const std::vector<double>&)>;
123 using EvalObjectiveGradFn = std::function<SensitivityType(const std::vector<double>&)>;
125 static constexpr
double default_min = -std::numeric_limits<double>::max();
126 static constexpr
double default_max = std::numeric_limits<double>::max();
134 Functional(EvalObjectiveFn obj, EvalObjectiveGradFn grad,
double lb = default_min,
double ub = default_max)
144 ResultType
Eval(
const std::vector<double>& v) {
return obj_(v); }
151 SensitivityType
EvalGradient(
const std::vector<double>& v) {
return grad_(v); }
160 EvalObjectiveFn obj_;
161 EvalObjectiveGradFn grad_;
230 template <
class OptType,
typename... Args>
233 void* optimizer_plugin = dlopen(optimizer_path.c_str(), RTLD_LAZY);
235 if (!optimizer_plugin) {
236 std::cout << dlerror() << std::endl;
240 auto load_optimizer =
241 reinterpret_cast<std::unique_ptr<OptType> (*)(Args...)
>(dlsym(optimizer_plugin,
"load_optimizer"));
242 if (load_optimizer) {
243 return load_optimizer(std::forward<Args>(args)...);
256 template <
typename V,
typename T>
259 return [=](T variables) {
260 V local_sum = local_func(variables);
263 if (error != MPI_SUCCESS) {
264 std::cout <<
"MPI_Error" << __FILE__ << __LINE__ << std::endl;
282 template <
typename T,
typename I>
285 std::function<std::vector<double>(
const std::vector<double>&)> local_obj_grad_func,
286 std::function<
double(
const std::vector<double>&)> local_reduce_func, MPI_Comm comm = MPI_COMM_WORLD)
288 return [=, &info, &global_ids_to_local](
const std::vector<double>& local_variables) {
290 auto local_obj_gradient = local_obj_grad_func(local_variables);
312 template <
typename T,
typename I,
typename ValuesType>
314 ValuesType& reduced_values)
317 auto returned_remapped_data =
319 ValuesType updated_local_variables;
320 if (info.send.size() == 0) {
322 updated_local_variables = reduced_values;
325 returned_remapped_data,
326 op::utility::reductions::firstOfCollection<
typename decltype(returned_remapped_data)::mapped_type>);
328 return updated_local_variables;
337 template <
typename T>
342 std::set<typename T::value_type> label_set(global_ids_on_rank.begin(), global_ids_on_rank.end());
343 assert(label_set.size() == global_ids_on_rank.size());
345 auto [global_size, variables_per_rank] =
346 op::utility::parallel::gatherVariablesPerRank<int>(global_ids_on_rank.size(),
true, root, mpicomm);
349 global_ids_on_rank,
true, root, mpicomm);
352 auto recv_send_info =
VectorType lowerBounds()
Get the lower bounds for each local optimization variable.
Go & onGo(const CallbackFn &go)
Define Go action.
Utility class for "converting" between Variables and something else.
virtual ~Optimizer()=default
Destructor.
double final_obj
final objective value
Functional(EvalObjectiveFn obj, EvalObjectiveGradFn grad, double lb=default_min, double ub=default_max)
Objective container class.
void Iteration()
What to do at the end of an optimization iteration.
Abstracted Optimization Vector container.
auto inverseMap(T &vector_map)
Inverts a vector that providse a map into an unordered_map.
auto filterOut(const T &global_local_ids, std::unordered_map< int, std::vector< typename T::size_type >> &filter)
remove values in filter that correspond to global_local_ids
Abstracted Optimizer implementation.
auto returnToSender(RankCommunication< T > &info, const V &local_data, MPI_Comm comm=MPI_COMM_WORLD)
transfer back data in reverse from sendToOwners
V concatGlobalVector(typename V::size_type global_size, std::vector< int > &variables_per_rank, std::vector< int > &offsets, V &local_vector, bool gatherAll=true, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Assemble a vector by concatination of local_vector across all ranks on a communicator.
SensitivityType EvalGradient(const std::vector< double > &v)
return the objective gradient evaluation
op::Go go
Go function to start optimization.
Optimizer()
Ctor has deferred initialization.
CallbackFn save
callback for saving current optimizer state
std::function< void()> CallbackFn
Callback function type.
ValuesType ReturnLocalUpdatedVariables(utility::RankCommunication< T > &info, I &global_ids_to_local, ValuesType &reduced_values)
Generate update method to propagate owned local variables back to local variables in parallel...
virtual void setObjective(Functional &o)=0
Sets the optimization objective.
CallbackFn update
Update callback to compute before function calculations.
auto AdvancedRegistration(T &global_ids_on_rank, int root=0, MPI_Comm mpicomm=MPI_COMM_WORLD)
std::unique_ptr< OptType > PluginOptimizer(std::string optimizer_path, Args &&...args)
Dynamically load an Optimizer.
VectorType & data()
Get the underlying data.
ScatterFn scatter
Scatter data function.
auto OwnedLocalObjectiveGradientFunction(utility::RankCommunication< T > &info, I &global_ids_to_local, T &reduced_id_list, std::function< std::vector< double >(const std::vector< double > &)> local_obj_grad_func, std::function< double(const std::vector< double > &)> local_reduce_func, MPI_Comm comm=MPI_COMM_WORLD)
Generate an objective gradient function that takes local variables and reduces them in parallel to lo...
double upper_bound
Upper bounds for this optimization functional.
void Go()
Start the optimization.
auto remapRecvDataIncludeLocal(std::unordered_map< int, T > &recv, std::unordered_map< int, V > &recv_data, std::unordered_map< typename T::value_type, T > &global_to_local_map, V &local_variables)
rearrange data so that map[rank]->local_ids and map[rank] -> V becomes map[local_ids]->values ...
GatherFn gather
Gather data function.
T permuteMapAccessStore(T &vector, M &map, I &global_ids_of_local_vector)
Retrieves from T using a permuted mapping M and index mapping I stores in order, result[i] = T[I[M[i]...
Go & onPreprocess(const CallbackFn &preprocess)
Define preprocess action.
VectorType upperBounds()
Get the upper bounds for each local optimization variable.
Complete Op communication pattern information.
std::vector< T > buildInclusiveOffsets(std::vector< T > &values_per_rank)
Takes in sizes per index and and performs a rank-local inclusive offset.
std::unordered_map< int, V > sendToOwners(RankCommunication< T > &info, V &local_data, MPI_Comm comm=MPI_COMM_WORLD)
transfer data to owners
M::mapped_type reduceRecvData(M &remapped_data, std::function< typename M::mapped_type::value_type(const typename M::mapped_type &)> reduce_op)
apply reduction operation to recieved data
void SaveState()
Saves the state of the optimizer.
Abstracted Objective Functional class.
virtual void addConstraint(Functional &)
Adds a constraint for the optimization problem.
auto ReduceObjectiveFunction(const std::function< V(T)> &local_func, MPI_Op op, MPI_Comm comm=MPI_COMM_WORLD)
Generate an objective function that performs a global reduction.
Holds communication information to and from rank.
virtual double Solution()
What to do when the solution is found. Return the objetive.
double lower_bound
Lower bounds for this optimization functional.
CallbackFn iterate
iterate callback to compute before
ResultType Eval(const std::vector< double > &v)
Return the objective evaluation.
std::enable_if_t<!(detail::has_data< T >::value &&detail::has_size< T >::value), int > Allreduce(T &local, T &global, MPI_Op operation, MPI_Comm comm=MPI_COMM_WORLD)
All reduce a single element across all ranks in a communicator.
void UpdatedVariableCallback()
What to do when the variables are updated.
RankCommunication< T > generateSendRecievePerRank(M local_ids, T &all_global_local_ids, I &offsets, MPI_Comm comm=MPI_COMM_WORLD)
given a map of local_ids and global_ids determine send and recv communications