File: SimulateStepRegressionCut.cpp

package info (click to toggle)
stopt 5.12%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 8,860 kB
  • sloc: cpp: 70,456; python: 5,950; makefile: 72; sh: 57
file content (75 lines) | stat: -rw-r--r-- 2,938 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
// Copyright (C) 2019 EDF
// All Rights Reserved
// This code is published under the GNU Lesser General Public License (GNU LGPL)
#include "StOpt/dp/SimulateStepRegressionCut.h"
#ifdef USE_MPI
#include <boost/mpi.hpp>
#include <boost/mpi/collectives.hpp>
#include "StOpt/core/parallelism/all_gatherv.hpp"
#endif

using namespace std;
using namespace StOpt;
using namespace Eigen;


SimulateStepRegressionCut::SimulateStepRegressionCut(gs::BinaryFileArchive &p_ar,  const int &p_iStep,  const string &p_nameCont,
        const   shared_ptr<SpaceGrid> &p_pGridFollowing, const  shared_ptr<StOpt::OptimizerDPCutBase > &p_pOptimize
#ifdef USE_MPI
        , const boost::mpi::communicator &p_world
#endif
                                                    ):
    m_pGridFollowing(p_pGridFollowing),
    m_pOptimize(p_pOptimize)
#ifdef USE_MPI
    , m_world(p_world)
#endif
{
    gs::Reference< vector< ContinuationCuts > > (p_ar, (p_nameCont + "Values").c_str(), boost::lexical_cast<string>(p_iStep).c_str()).restore(0, &m_continuationObj);
}

void SimulateStepRegressionCut::oneStep(vector<StateWithStocks > &p_statevector, ArrayXXd  &p_phiInOut) const
{


#ifdef USE_MPI
    int rank = m_world.rank();
    int nbProc = m_world.size();    // parallelism
    int nsimPProc = (int)(p_statevector.size() / nbProc);
    int nRestSim = p_statevector.size() % nbProc;
    int iFirstSim = rank * nsimPProc + (rank < nRestSim ? rank : nRestSim);
    int iLastSim  = iFirstSim + nsimPProc + (rank < nRestSim ? 1 : 0);
    ArrayXXd stockPerSim(m_pGridFollowing->getDimension(), iLastSim - iFirstSim);
    ArrayXXd valueFunctionPerSim(p_phiInOut.rows(), iLastSim - iFirstSim);
    // spread calculations on processors
    int  is = 0 ;
#ifdef _OPENMP
    #pragma omp parallel for  private(is)
#endif
    for (is = iFirstSim; is <  iLastSim; ++is)
    {
        m_pOptimize->stepSimulate(m_pGridFollowing, m_continuationObj, p_statevector[is], p_phiInOut.col(is));
        // store for broadcast
        stockPerSim.col(is - iFirstSim) = p_statevector[is].getPtStock();
        if (valueFunctionPerSim.size() > 0)
            valueFunctionPerSim.col(is - iFirstSim) = p_phiInOut.col(is);
    }
    // broadcast
    ArrayXXd stockAllSim(m_pGridFollowing->getDimension(), p_statevector.size());
    boost::mpi::all_gatherv<double>(m_world, stockPerSim.data(), stockPerSim.size(), stockAllSim.data());
    boost::mpi::all_gatherv<double>(m_world, valueFunctionPerSim.data(), valueFunctionPerSim.size(), p_phiInOut.data());
    // update results
    for (size_t iis = 0; iis < p_statevector.size(); ++iis)
        p_statevector[iis].setPtStock(stockAllSim.col(iis));
#else
    int  is = 0 ;
#ifdef _OPENMP
    #pragma omp parallel for  private(is)
#endif
    for (is = 0; is <  static_cast<int>(p_statevector.size()); ++is)
    {
        m_pOptimize->stepSimulate(m_pGridFollowing, m_continuationObj, p_statevector[is], p_phiInOut.col(is));
    }

#endif
}