1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
|
#include "FEDataStructures.h"
#include <mpi.h>
#include <iostream>
#include "FEAdaptor.h"
void SubCommunicatorDriver(int argc, char* argv[], MPI_Comm* handle)
{
Grid grid;
unsigned int numPoints[3] = {70, 60, 44};
double spacing[3] = {1, 1.1, 1.3};
grid.Initialize(numPoints, spacing);
Attributes attributes;
attributes.Initialize(&grid);
FEAdaptor::Initialize(argc, argv, handle);
unsigned int numberOfTimeSteps = 100;
for(unsigned int timeStep=0;timeStep<numberOfTimeSteps;timeStep++)
{
// use a time step length of 0.1
double time = timeStep * 0.1;
attributes.UpdateFields(time);
FEAdaptor::CoProcess(grid, attributes, time, timeStep, timeStep == numberOfTimeSteps-1);
}
FEAdaptor::Finalize();
}
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
int myrank, numprocs;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Group orig_group;
MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
std::vector<int> subranks;
for(int i=0;i<numprocs/2;i++)
subranks.push_back(i);
if(subranks.empty())
{
subranks.push_back(0);
}
MPI_Group subgroup;
MPI_Group_incl(orig_group, subranks.size(), &(subranks[0]), &subgroup);
MPI_Comm subcommunicator;
MPI_Comm_create(MPI_COMM_WORLD, subgroup, &subcommunicator);
if (myrank < static_cast<int>(subranks.size()))
{
int newrank;
MPI_Comm_rank(MPI_COMM_WORLD, &newrank);
std::cout << "got a rank\n";
SubCommunicatorDriver(argc, argv, &subcommunicator);
std::cout << "Process " << myrank << " did some co-processing.\n";
}
else
{
std::cout << "Process " << myrank << " did not do any co-processing.\n";
}
MPI_Finalize();
return 0;
}
|