File: FinalStepZeroDist.h

package info (click to toggle)
stopt 5.12%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 8,860 kB
  • sloc: cpp: 70,456; python: 5,950; makefile: 72; sh: 57
file content (84 lines) | stat: -rw-r--r-- 3,107 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
// Copyright (C) 2016 EDF
// All Rights Reserved
// This code is published under the GNU Lesser General Public License (GNU LGPL)
#ifndef FINALSTEPZERODIST_H
#define FINALSTEPZERODIST_H
#include <functional>
#include <memory>
#include <vector>
#include <boost/mpi.hpp>
#include <Eigen/Dense>
#include "StOpt/core/parallelism/ParallelComputeGridSplitting.h"


/** \file FinalStepZeroDist.h
 *  \brief permits to affect 0 to the final value
 * \author  Xavier Warin
 */
namespace StOpt
{

/// \class FinalStepZeroDist FinalStepZeroDist.h
///   Last time in dynamic programming : set to 0
///   Each regime has a grid with same dimension
template< class grid>
class FinalStepZeroDist
{

private :

    std::vector<std::shared_ptr< grid> >  m_pGridCurrent; ///< grid at final time step
    std::vector<std::shared_ptr< grid> >    m_gridCurrentProc ; ///< local grid  treated by the processor
    int m_nbRegime ; ///< Number of regimes

public:

    /// \brief Contructor
    /// \param p_pGridCurrent   grids describing the whole problem for each regime
    /// \param p_bdimToSplit    Dimensions to split for parallelism ofr each regime
    /// \param p_world          MPI communicator
    FinalStepZeroDist(const  std::vector< std::shared_ptr< grid > >   &p_pGridCurrent,
                      const  std::vector< Eigen::Array< bool, Eigen::Dynamic, 1> >   &p_bdimToSplit,
                      const boost::mpi::communicator &p_world):
        m_pGridCurrent(p_pGridCurrent),
        m_gridCurrentProc(p_pGridCurrent.size()),
        m_nbRegime(p_pGridCurrent.size())
    {
        for (int iReg = 0;  iReg < m_nbRegime; ++iReg)
        {

            // initial dimension
            Eigen::ArrayXi initialDimension   = p_pGridCurrent[iReg]->getDimensions();
            // organize the hypercube splitting for parallel
            Eigen::ArrayXi splittingRatio = paraOptimalSplitting(initialDimension, p_bdimToSplit[iReg], p_world);
            // grid treated by current processor
            m_gridCurrentProc[iReg] = m_pGridCurrent[iReg]->getSubGrid(paraSplitComputationGridsProc(initialDimension, splittingRatio, p_world.rank()));
        }
    }
    /// \brief Fill array with 0 for each processor
    /// \param p_nbSimul   number of particles
    std::vector< std::shared_ptr< Eigen::ArrayXXd > > operator()(const int &p_nbSimul) const
    {
        std::vector<std::shared_ptr< Eigen::ArrayXXd > > finalValues(m_nbRegime);
        for (int iReg = 0; iReg < m_nbRegime; ++iReg)
        {
            if (m_gridCurrentProc[iReg]->getNbPoints() > 0)
            {
                finalValues[iReg] = std::make_shared<Eigen::ArrayXXd>(Eigen::ArrayXXd::Zero(p_nbSimul, m_gridCurrentProc[iReg]->getNbPoints()));
            }
            else
            {
                finalValues[iReg] = std::make_shared<Eigen::ArrayXXd>();
            }
        }
        return finalValues;
    }

    /// \brief get back local grid for each regime associated to current step
    inline std::vector< std::shared_ptr<grid> >    getGridCurrentProc()const
    {
        return m_gridCurrentProc ;
    }
};
}
#endif