1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
|
// Copyright (C) 2016 EDF
// All Rights Reserved
// This code is published under the GNU Lesser General Public License (GNU LGPL)
#ifdef USE_MPI
#include <boost/mpi.hpp>
#include <boost/mpi/collectives.hpp>
#include "StOpt/core/parallelism/all_gatherv.hpp"
#endif
#include "StOpt/dp/SimulateStepRegressionControl.h"
using namespace std;
using namespace StOpt;
using namespace Eigen;
SimulateStepRegressionControl::SimulateStepRegressionControl(gs::BinaryFileArchive &p_ar, const int &p_iStep, const string &p_nameCont,
const shared_ptr<SpaceGrid> &p_pGridFollowing,
const shared_ptr<StOpt::OptimizerBaseInterp > &p_pOptimize
#ifdef USE_MPI
, const boost::mpi::communicator &p_world
#endif
):
m_pGridFollowing(p_pGridFollowing),
m_pOptimize(p_pOptimize)
#ifdef USE_MPI
, m_world(p_world)
#endif
{
gs::Reference< vector<GridAndRegressedValue> > (p_ar, (p_nameCont + "Control").c_str(), boost::lexical_cast<string>(p_iStep).c_str()).restore(0, &m_control);
}
void SimulateStepRegressionControl::oneStep(vector<StateWithStocks > &p_statevector, ArrayXXd &p_phiInOut) const
{
#ifdef USE_MPI
int rank = m_world.rank();
int nbProc = m_world.size(); // parallelism
int nsimPProc = (int)(p_statevector.size() / nbProc);
int nRestSim = p_statevector.size() % nbProc;
int iFirstSim = rank * nsimPProc + (rank < nRestSim ? rank : nRestSim);
int iLastSim = iFirstSim + nsimPProc + (rank < nRestSim ? 1 : 0);
ArrayXXd stockPerSim(m_pGridFollowing->getDimension(), iLastSim - iFirstSim);
// nows store regimes
ArrayXi regimePerSim(iLastSim - iFirstSim);
ArrayXXd valueFunctionPerSim(p_phiInOut.rows(), iLastSim - iFirstSim);
// spread calculations on processors
int is = 0 ;
#ifdef _OPENMP
#pragma omp parallel for private(is)
#endif
for (is = iFirstSim; is < iLastSim; ++is)
{
m_pOptimize->stepSimulateControl(m_pGridFollowing, m_control, p_statevector[is], p_phiInOut.col(is));
// store for broadcast
stockPerSim.col(is - iFirstSim) = p_statevector[is].getPtStock();
regimePerSim(is - iFirstSim) = p_statevector[is].getRegime();
if (valueFunctionPerSim.size() > 0)
valueFunctionPerSim.col(is - iFirstSim) = p_phiInOut.col(is);
}
// broadcast
ArrayXXd stockAllSim(m_pGridFollowing->getDimension(), p_statevector.size());
boost::mpi::all_gatherv<double>(m_world, stockPerSim.data(), stockPerSim.size(), stockAllSim.data());
ArrayXi regimeAllSim(p_statevector.size());
boost::mpi::all_gatherv<int>(m_world, regimePerSim.data(), regimePerSim.size(), regimeAllSim);
boost::mpi::all_gatherv<double>(m_world, valueFunctionPerSim.data(), valueFunctionPerSim.size(), p_phiInOut.data());
// update results
for (size_t iis = 0; iis < p_statevector.size(); ++iis)
{
p_statevector[iis].setPtStock(stockAllSim.col(iis));
p_statevector[iis].setRegime(regimeAllSim(iis));
}
#else
int is = 0 ;
#ifdef _OPENMP
#pragma omp parallel for private(is)
#endif
for (is = 0; is < static_cast<int>(p_statevector.size()); ++is)
{
m_pOptimize->stepSimulateControl(m_pGridFollowing, m_control, p_statevector[is], p_phiInOut.col(is));
}
#endif
}
|