File: dofManager.cpp

package info (click to toggle)
gmsh 4.7.1%2Bds1-5
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 95,484 kB
  • sloc: cpp: 566,747; ansic: 150,384; yacc: 7,198; python: 6,130; java: 3,486; lisp: 622; lex: 621; makefile: 613; perl: 571; sh: 439; xml: 415; javascript: 113; pascal: 35; modula3: 32
file content (161 lines) | stat: -rw-r--r-- 5,896 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
// Gmsh - Copyright (C) 1997-2020 C. Geuzaine, J.-F. Remacle
//
// See the LICENSE.txt file for license information. Please report all
// issues on https://gitlab.onelab.info/gmsh/gmsh/issues.

#include "GmshConfig.h"

#ifdef HAVE_MPI
#include "mpi.h"
#endif

#include <dofManager.h>

template <> void dofManager<double>::scatterSolution()
{
#ifdef HAVE_MPI
  if(!_parallelFinalized) {
    _parallelFinalize();
  }
  MPI_Status status;
  std::vector<std::vector<double> > sendBuf(Msg::GetCommSize()),
    recvBuf(Msg::GetCommSize());
  std::vector<MPI_Request> reqRecv(Msg::GetCommSize()),
    reqSend(Msg::GetCommSize());
  for(int i = 0; i < Msg::GetCommSize(); i++) {
    reqRecv[i] = reqSend[i] = MPI_REQUEST_NULL;
    if(!ghostByProc[i].empty()) {
      recvBuf[i].resize(ghostByProc[i].size());
      MPI_Irecv(&recvBuf[i][0], recvBuf[i].size(), MPI_DOUBLE, i, 0,
                MPI_COMM_WORLD, &reqRecv[i]);
    }
    if(!parentByProc[i].empty()) {
      getDofValue(parentByProc[i], sendBuf[i]);
      MPI_Isend(&sendBuf[i][0], sendBuf[i].size(), MPI_DOUBLE, i, 0,
                MPI_COMM_WORLD, &reqSend[i]);
    }
  }
  int index;
  while(MPI_Waitany(Msg::GetCommSize(), &reqRecv[0], &index, &status) == 0 &&
        index != MPI_UNDEFINED) {
    if(status.MPI_TAG == 0)
      for(std::size_t j = 0; j < recvBuf[index].size(); j++) {
        ghostValue[ghostByProc[index][j]] = recvBuf[index][j];
        // const Dof &dof = ghostByProc[index][j];
      }
  }
  MPI_Waitall(Msg::GetCommSize(), &reqSend[0], MPI_STATUS_IGNORE);
#endif
}

void dofManagerBase::_parallelFinalize()
{
  _localSize = unknown.size();
#ifdef HAVE_MPI
  int numStart;
  int numTotal;
  MPI_Status status;
  parentByProc.resize(Msg::GetCommSize());
  ghostByProc.resize(Msg::GetCommSize());
  if(Msg::GetCommRank() == 0) {
    numStart = 0;
  }
  else
    MPI_Recv(&numStart, 1, MPI_INT, Msg::GetCommRank() - 1, 0, MPI_COMM_WORLD,
             &status);
  numTotal = numStart + _localSize;
  if(Msg::GetCommRank() != Msg::GetCommSize() - 1)
    MPI_Send(&numTotal, 1, MPI_INT, Msg::GetCommRank() + 1, 0, MPI_COMM_WORLD);
  MPI_Bcast(&numTotal, 1, MPI_INT, Msg::GetCommSize() - 1, MPI_COMM_WORLD);
  for(std::map<Dof, int>::iterator it = unknown.begin(); it != unknown.end();
      it++)
    it->second += numStart;
  std::vector<std::list<Dof> > ghostedByProc;
  int *nRequest = new int[Msg::GetCommSize()];
  int *nRequested = new int[Msg::GetCommSize()];
  for(int i = 0; i < Msg::GetCommSize(); i++) nRequest[i] = 0;
  for(std::map<Dof, std::pair<int, int> >::iterator it = ghostByDof.begin();
      it != ghostByDof.end(); it++) {
    int procId = it->second.first;
    it->second.second = nRequest[procId]++;
  }
  MPI_Alltoall(nRequest, 1, MPI_INT, nRequested, 1, MPI_INT, MPI_COMM_WORLD);
  long int **recv0 = new long int *[Msg::GetCommSize()];
  int **recv1 = new int *[Msg::GetCommSize()];
  long int **send0 = new long int *[Msg::GetCommSize()];
  int **send1 = new int *[Msg::GetCommSize()];
  MPI_Request *reqRecv0 = new MPI_Request[2 * Msg::GetCommSize()];
  MPI_Request *reqRecv1 = reqRecv0 + Msg::GetCommSize();
  MPI_Request *reqSend0 = new MPI_Request[Msg::GetCommSize()];
  MPI_Request *reqSend1 = new MPI_Request[Msg::GetCommSize()];
  for(int i = 0; i < Msg::GetCommSize(); i++) {
    send0[i] = new long int[nRequest[i] * 2];
    recv0[i] = new long int[nRequested[i] * 2];
    send1[i] = new int[nRequested[i]];
    recv1[i] = new int[nRequest[i]];
    reqSend0[i] = reqSend1[i] = reqRecv0[i] = reqRecv1[i] = MPI_REQUEST_NULL;
    parentByProc[i].resize(nRequested[i], Dof(0, 0));
    ghostByProc[i].resize(nRequest[i], Dof(0, 0));
  }
  for(int i = 0; i < Msg::GetCommSize(); i++) nRequest[i] = 0;
  for(std::map<Dof, std::pair<int, int> >::iterator it = ghostByDof.begin();
      it != ghostByDof.end(); it++) {
    int proc = it->second.first;
    send0[proc][nRequest[proc] * 2] = it->first.getEntity();
    send0[proc][nRequest[proc] * 2 + 1] = it->first.getType();
    ghostByProc[proc][nRequest[proc]] = it->first;
    nRequest[proc]++;
  }
  for(int i = 0; i < Msg::GetCommSize(); i++) {
    if(nRequested[i] > 0) {
      MPI_Irecv(recv0[i], 2 * nRequested[i], MPI_LONG, i, 0, MPI_COMM_WORLD,
                &reqRecv0[i]);
    }
    if(nRequest[i] > 0) {
      MPI_Irecv(recv1[i], 2 * nRequest[i], MPI_INT, i, 1, MPI_COMM_WORLD,
                &reqRecv1[i]);
      MPI_Isend(send0[i], 2 * nRequest[i], MPI_LONG, i, 0, MPI_COMM_WORLD,
                &reqSend0[i]);
    }
  }
  int index;
  while(MPI_Waitany(2 * Msg::GetCommSize(), reqRecv0, &index, &status) == 0 &&
        index != MPI_UNDEFINED) {
    if(status.MPI_TAG == 0) {
      for(int j = 0; j < nRequested[index]; j++) {
        Dof d(recv0[index][j * 2], recv0[index][j * 2 + 1]);
        std::map<Dof, int>::iterator it = unknown.find(d);
        if(it == unknown.end())
          Msg::Error("ghost Dof does not exist on parent process");
        send1[index][j] = it->second;
        parentByProc[index][j] = d;
      }
      MPI_Isend(send1[index], nRequested[index], MPI_INT, index, 1,
                MPI_COMM_WORLD, &reqSend1[index]);
    }
  }
  for(int i = 0; i < Msg::GetCommSize(); i++)
    for(int i = 0; i < Msg::GetCommSize(); i++) nRequest[i] = 0;
  for(std::map<Dof, std::pair<int, int> >::iterator it = ghostByDof.begin();
      it != ghostByDof.end(); it++) {
    int proc = it->second.first;
    unknown[it->first] = recv1[proc][nRequest[proc]++];
  }
  MPI_Waitall(Msg::GetCommSize(), reqSend0, MPI_STATUS_IGNORE);
  MPI_Waitall(Msg::GetCommSize(), reqSend1, MPI_STATUS_IGNORE);
  for(int i = 0; i < Msg::GetCommSize(); i++) {
    delete[] send0[i];
    delete[] send1[i];
    delete[] recv0[i];
    delete[] recv1[i];
  }
  delete[] send0;
  delete[] send1;
  delete[] recv0;
  delete[] recv1;
  delete[] reqSend0;
  delete[] reqSend1;
  delete[] reqRecv0;
#endif
  _parallelFinalized = true;
}