1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
|
// Copyright (C) 2016 EDF
// All Rights Reserved
// This code is published under the GNU Lesser General Public License (GNU LGPL)
#ifdef USE_MPI
#include <functional>
#include <memory>
#include <boost/mpi.hpp>
#include <Eigen/Dense>
#include "StOpt/core/parallelism/ParallelComputeGridSplitting.h"
#include "StOpt/core/utils/primeNumber.h"
#include "StOpt/dp/OptimizerBase.h"
#include "StOpt/dp/TransitionStepBaseDist.h"
#include "StOpt/core/parallelism/GridReach.h"
using namespace StOpt;
using namespace Eigen;
using namespace std;
TransitionStepBaseDist::TransitionStepBaseDist(const shared_ptr<FullGrid> &p_pGridCurrent,
const shared_ptr<FullGrid> &p_pGridPrevious,
const shared_ptr<OptimizerBase > &p_pOptimize,
const boost::mpi::communicator &p_world):
m_pGridCurrent(p_pGridCurrent), m_pGridPrevious(p_pGridPrevious), m_pOptimize(p_pOptimize), m_world(p_world)
{
// initial and previous dimensions
ArrayXi initialDimension = p_pGridCurrent->getDimensions();
ArrayXi initialDimensionPrev = p_pGridPrevious->getDimensions();
// organize the hypercube splitting for parallel
ArrayXi splittingRatio = paraOptimalSplitting(initialDimension, m_pOptimize->getDimensionToSplit(), p_world);
ArrayXi splittingRatioPrev = paraOptimalSplitting(initialDimensionPrev, m_pOptimize->getDimensionToSplit(), p_world);
// cone value
function < SubMeshIntCoord(const SubMeshIntCoord &) > fMesh = GridReach<OptimizerBase>(p_pGridCurrent, p_pGridPrevious, p_pOptimize);
// ParallelComputeGridsSplitting objects
m_paral = make_shared<ParallelComputeGridSplitting>(initialDimension, initialDimensionPrev, fMesh, splittingRatio, splittingRatioPrev, p_world);
// get back grid treated by current processor
Array< array<int, 2 >, Dynamic, 1 > gridLocal = m_paral->getCurrentCalculationGrid();
// Construct local sub grid
m_gridCurrentProc = m_pGridCurrent->getSubGrid(gridLocal);
// only if the grid is not empty
if (m_gridCurrentProc->getNbPoints() > 0)
{
// get back grid extended on previous step
Array< array<int, 2 >, Dynamic, 1 > gridLocalExtended = m_paral->getExtendedGridProcOldGrid();
m_gridExtendPreviousStep = m_pGridPrevious->getSubGrid(gridLocalExtended);
}
}
void TransitionStepBaseDist::reconstructOnProc0(const vector< shared_ptr< Eigen::ArrayXXd > > &p_phiIn, vector< shared_ptr< Eigen::ArrayXXd > > &p_phiOut)
{
p_phiOut.resize(p_phiIn.size());
ArrayXi initialDimension = m_pGridCurrent->getDimensions();
Array< array<int, 2 >, Dynamic, 1 > gridOnProc0(initialDimension.size());
for (int id = 0; id < initialDimension.size(); ++id)
{
gridOnProc0(id)[0] = 0 ;
gridOnProc0(id)[1] = initialDimension(id) ;
}
for (size_t i = 0; i < p_phiIn.size(); ++i)
{
if (m_world.rank() < m_paral->getNbProcessorUsed())
p_phiOut[i] = make_shared<Eigen::ArrayXXd>(m_paral->reconstruct(*p_phiIn[i], gridOnProc0));
}
}
#endif
|