1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
|
// Copyright (C) 2019 EDF
// All Rights Reserved
// This code is published under the GNU Lesser General Public License (GNU LGPL)
#include <memory>
#include "geners/vectorIO.hh"
#include "geners/Record.hh"
#ifdef USE_MPI
#include "boost/mpi.hpp"
#include "StOpt/core/parallelism/all_gatherv.hpp"
#endif
#ifdef _OPENMP
#include <omp.h>
#include "StOpt/core/utils/OpenmpException.h"
#endif
#include "StOpt/dp/TransitionStepRegressionDPCut.h"
#include "StOpt/core/grids/GridIterator.h"
#include "StOpt/regression/ContinuationCuts.h"
#include "StOpt/regression/ContinuationCutsGeners.h"
#include "StOpt/regression/GridAndRegressedValue.h"
#include "StOpt/regression/GridAndRegressedValueGeners.h"
using namespace Eigen;
using namespace StOpt;
using namespace std;
TransitionStepRegressionDPCut::TransitionStepRegressionDPCut(const shared_ptr<FullGrid> &p_pGridCurrent,
const shared_ptr<FullGrid> &p_pGridPrevious,
const shared_ptr<OptimizerDPCutBase > &p_pOptimize
#ifdef USE_MPI
, const boost::mpi::communicator &p_world
#endif
):
m_pGridCurrent(p_pGridCurrent), m_pGridPrevious(p_pGridPrevious), m_pOptimize(p_pOptimize)
#ifdef USE_MPI
, m_world(p_world)
#endif
{
}
vector< shared_ptr< ArrayXXd > > TransitionStepRegressionDPCut::oneStep(const vector< shared_ptr< ArrayXXd > > &p_phiIn,
const shared_ptr< BaseRegression> &p_condExp) const
{
// number of regimes at current time
int nbRegimes = m_pOptimize->getNbRegime();
vector< shared_ptr< ArrayXXd > > phiOut(nbRegimes);
// only if the processor is working
if (m_pGridCurrent->getNbPoints() > 0)
{
#ifdef USE_MPI
int rank = m_world.rank();
int nbProc = m_world.size();
// allocate for solution
int nbPointsCur = m_pGridCurrent->getNbPoints();
int npointPProcCur = (int)(nbPointsCur / nbProc);
int nRestPointCur = nbPointsCur % nbProc;
int iFirstPointCur = rank * npointPProcCur + (rank < nRestPointCur ? rank : nRestPointCur);
int iLastPointCur = iFirstPointCur + npointPProcCur + (rank < nRestPointCur ? 1 : 0);
std::vector< ArrayXXd> phiOutLoc(nbRegimes);
ArrayXi ilocToGLobal(iLastPointCur - iFirstPointCur);
for (int iReg = 0; iReg < nbRegimes; ++iReg)
phiOutLoc[iReg].resize(p_condExp->getNbSimul() * (m_pGridCurrent->getDimension() + 1), iLastPointCur - iFirstPointCur);
#endif
// allocate for solution
for (int iReg = 0; iReg < nbRegimes; ++iReg)
phiOut[iReg] = make_shared< ArrayXXd >(p_condExp->getNbSimul() * (m_pGridCurrent->getDimension() + 1), m_pGridCurrent->getNbPoints());
// number of thread
#ifdef _OPENMP
int nbThreads = omp_get_max_threads();
#else
int nbThreads = 1;
#endif
// create continuation values
vector< ContinuationCuts > contVal(p_phiIn.size());
for (size_t iReg = 0; iReg < p_phiIn.size(); ++iReg)
contVal[iReg] = ContinuationCuts(m_pGridPrevious, p_condExp, *p_phiIn[iReg]);
// create iterator on current grid treated for processor
int iThread = 0 ;
#ifdef _OPENMP
OpenmpException excep; // deal with exception in openmp
#pragma omp parallel for private(iThread)
#endif
for (iThread = 0; iThread < nbThreads; ++iThread)
{
#ifdef _OPENMP
excep.run([&]
{
#endif
shared_ptr< GridIterator > iterGridPoint = m_pGridCurrent->getGridIterator();
// account for mpi and threads
#ifdef USE_MPI
iterGridPoint->jumpToAndInc(rank, nbProc, iThread);
#else
iterGridPoint->jumpToAndInc(0, 1, iThread);
#endif
// iterates on points of the grid
while (iterGridPoint->isValid())
{
ArrayXd pointCoord = iterGridPoint->getCoordinate();
// optimize the current point and the set of regimes -> get back cuts per simulation and stock point
ArrayXXd solution = m_pOptimize->stepOptimize(m_pGridPrevious, pointCoord, contVal);
#ifdef USE_MPI
// copie solution
int iposArray = iterGridPoint->getRelativePosition();
ilocToGLobal(iposArray) = iterGridPoint->getCount();
// copie solution
for (int iReg = 0; iReg < nbRegimes; ++iReg)
phiOutLoc[iReg].col(iposArray) = solution.col(iReg);
#else
// copie solution
for (int iReg = 0; iReg < nbRegimes; ++iReg)
(*phiOut[iReg]).col(iterGridPoint->getCount()) = solution.col(iReg);
#endif
iterGridPoint->nextInc(nbThreads);
}
#ifdef _OPENMP
});
#endif
}
#ifdef _OPENMP
excep.rethrow();
#endif
#ifdef USE_MPI
ArrayXi ilocToGLobalGlob(nbPointsCur);
boost::mpi::all_gatherv<int>(m_world, ilocToGLobal.data(), ilocToGLobal.size(), ilocToGLobalGlob.data());
ArrayXXd storeGlob(p_condExp->getNbSimul() * (m_pGridCurrent->getDimension() + 1), nbPointsCur);
for (int iReg = 0; iReg < nbRegimes; ++iReg)
{
boost::mpi::all_gatherv<double>(m_world, phiOutLoc[iReg].data(), phiOutLoc[iReg].size(), storeGlob.data());
for (int ipos = 0; ipos < ilocToGLobalGlob.size(); ++ipos)
(*phiOut[iReg]).col(ilocToGLobalGlob(ipos)) = storeGlob.col(ipos);
}
#endif
}
return phiOut;
}
void TransitionStepRegressionDPCut::dumpContinuationCutsValues(std::shared_ptr<gs::BinaryFileArchive> p_ar, const string &p_name, const int &p_iStep, const vector< shared_ptr< ArrayXXd > > &p_phiIn, const shared_ptr<BaseRegression> &p_condExp) const
{
#ifdef USE_MPI
if (m_world.rank() == 0)
{
#endif
vector< ContinuationCuts > contVal(p_phiIn.size());
for (size_t iReg = 0; iReg < p_phiIn.size(); ++iReg)
contVal[iReg] = ContinuationCuts(m_pGridPrevious, p_condExp, *p_phiIn[iReg]);
string stepString = boost::lexical_cast<string>(p_iStep) ;
*p_ar << gs::Record(contVal, (p_name + "Values").c_str(), stepString.c_str()) ;
p_ar->flush() ; // necessary for python mapping
#ifdef USE_MPI
}
#endif
}
void TransitionStepRegressionDPCut::dumpBellmanCutsValues(std::shared_ptr<gs::BinaryFileArchive> p_ar, const string &p_name, const int &p_iStep, const vector< shared_ptr< ArrayXXd > > &p_phiIn, const shared_ptr<BaseRegression> &p_condExp) const
{
#ifdef USE_MPI
if (m_world.rank() == 0)
{
#endif
vector< ContinuationCuts > contVal(p_phiIn.size());
for (size_t iReg = 0; iReg < p_phiIn.size(); ++iReg)
contVal[iReg] = ContinuationCuts(m_pGridCurrent, p_condExp, *p_phiIn[iReg]);
string stepString = boost::lexical_cast<string>(p_iStep) ;
*p_ar << gs::Record(contVal, (p_name + "Values").c_str(), stepString.c_str()) ;
p_ar->flush() ; // necessary for python mapping
#ifdef USE_MPI
}
#endif
}
|