1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
|
// Copyright (C) 2023 EDF
// All Rights Reserved
// This code is published under the GNU Lesser General Public License (GNU LGPL)
#include "StOpt/dp/SimulateStepMultiStageRegression.h"
#ifdef USE_MPI
#include <boost/mpi.hpp>
#include <boost/mpi/collectives.hpp>
#include "StOpt/core/parallelism/all_gatherv.hpp"
#endif
using namespace std;
using namespace StOpt;
using namespace Eigen;
SimulateStepMultiStageRegression::SimulateStepMultiStageRegression(std::shared_ptr<gs::BinaryFileArchive> &p_ar, const int &p_iStep, const string &p_nameCont, const std::string &p_nameDetCont,
const shared_ptr<SpaceGrid> &p_pGridFollowing, const shared_ptr<StOpt::OptimizerMultiStageDPBase > &p_pOptimize
#ifdef USE_MPI
, const boost::mpi::communicator &p_world
#endif
): m_pGridFollowing(p_pGridFollowing),
m_pOptimize(p_pOptimize), m_ar(p_ar), m_iStep(p_iStep), m_nameCont(p_nameCont), m_nameDetCont(p_nameDetCont)
#ifdef USE_MPI
, m_world(p_world)
#endif
{}
void SimulateStepMultiStageRegression::oneStep(vector<StateWithStocks > &p_statevector, std::vector<ArrayXXd> &p_phiInOut) const
{
shared_ptr< SimulatorMultiStageDPBase > simulator = m_pOptimize->getSimulator();
int nbPeriodsOfCurrentStep = simulator->getNbPeriodsInTransition();
for (int iPeriod = 0; iPeriod < nbPeriodsOfCurrentStep ; iPeriod++)
{
// set period number in simulator
simulator->setPeriodInTransition(iPeriod);
// to store the next grid
vector< GridAndRegressedValue > contVal;
shared_ptr< SpaceGrid> gridFollLoc;
if (iPeriod == (nbPeriodsOfCurrentStep - 1))
{
gs::Reference< vector< GridAndRegressedValue > >(*m_ar, (m_nameCont + "Values").c_str(), boost::lexical_cast<string>(m_iStep).c_str()).restore(0, &contVal);
gridFollLoc = m_pGridFollowing;
}
else
{
gs::Reference< vector< GridAndRegressedValue > > (*m_ar, m_nameDetCont.c_str(), boost::lexical_cast<string>(iPeriod).c_str()).restore(0, &contVal);
gridFollLoc = contVal[0].getGrid();
}
#ifdef USE_MPI
int rank = m_world.rank();
int nbProc = m_world.size(); // parallelism
int nsimPProc = (int)(p_statevector.size() / nbProc);
int nRestSim = p_statevector.size() % nbProc;
int iFirstSim = rank * nsimPProc + (rank < nRestSim ? rank : nRestSim);
int iLastSim = iFirstSim + nsimPProc + (rank < nRestSim ? 1 : 0);
ArrayXXd stockPerSim(p_statevector[0].getStockSize(), iLastSim - iFirstSim);
ArrayXXd valueFunctionPerSim(p_phiInOut[0].rows(), iLastSim - iFirstSim);
// spread calculations on processors
int is = 0 ;
#ifdef _OPENMP
#pragma omp parallel for private(is)
#endif
for (is = iFirstSim; is < iLastSim; ++is)
{
m_pOptimize->stepSimulate(gridFollLoc, contVal, p_statevector[is], p_phiInOut[iPeriod].col(is));
// store for broadcast
stockPerSim.col(is - iFirstSim) = p_statevector[is].getPtStock();
if (valueFunctionPerSim.size() > 0)
valueFunctionPerSim.col(is - iFirstSim) = p_phiInOut[iPeriod].col(is);
}
// broadcast
ArrayXXd stockAllSim(p_statevector[0].getStockSize(), p_statevector.size());
boost::mpi::all_gatherv<double>(m_world, stockPerSim.data(), stockPerSim.size(), stockAllSim.data());
boost::mpi::all_gatherv<double>(m_world, valueFunctionPerSim.data(), valueFunctionPerSim.size(), p_phiInOut[iPeriod].data());
// update results
for (size_t iis = 0; iis < p_statevector.size(); ++iis)
p_statevector[iis].setPtStock(stockAllSim.col(iis));
#else
int is = 0 ;
#ifdef _OPENMP
#pragma omp parallel for private(is)
#endif
for (is = 0; is < static_cast<int>(p_statevector.size()); ++is)
{
m_pOptimize->stepSimulate(gridFollLoc, contVal, p_statevector[is], p_phiInOut[iPeriod].col(is));
}
#endif
//prepare next period
if (iPeriod < nbPeriodsOfCurrentStep - 1)
{
p_phiInOut[iPeriod + 1] = p_phiInOut[iPeriod];
}
}
}
|