File: comm.cpp

package info (click to toggle)
esys-particle 2.3.4%2Bdfsg1-4
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 13,036 kB
  • ctags: 10,805
  • sloc: cpp: 80,009; python: 5,872; makefile: 1,243; sh: 313; perl: 225
file content (180 lines) | stat: -rw-r--r-- 4,114 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/////////////////////////////////////////////////////////////
//                                                         //
// Copyright (c) 2003-2014 by The University of Queensland //
// Centre for Geoscience Computing                         //
// http://earth.uq.edu.au/centre-geoscience-computing      //
//                                                         //
// Primary Business: Brisbane, Queensland, Australia       //
// Licensed under the Open Software License version 3.0    //
// http://www.apache.org/licenses/LICENSE-2.0          //
//                                                         //
/////////////////////////////////////////////////////////////


#include "tml/comm/comm.h"
#include "Foundation/console.h"

//--- I/O ---

/*!
  Default constructor for TML_comm. Sets the MPI communicator
  to MPI_COMM_NULL;
*/
TML_Comm::TML_Comm()  
{
  m_comm=MPI_COMM_NULL;
}

/*!
  construct TML_Comm from MPI communicator

  \param comm the MPI communicator
*/
TML_Comm::TML_Comm(MPI_Comm comm)
{
  m_comm=comm;
}

/*!
  set the undelying MPI communicator

  \param comm the MPI communicator
*/
void TML_Comm::setComm(MPI_Comm comm)
{
  m_comm=comm;
}

TML_Comm& TML_Comm::operator=(const TML_Comm& rhs)
{
  //  MPI_Comm_dup(rhs.m_comm,&m_comm);
  m_comm=rhs.m_comm;

  return *this;
}

int TML_Comm::rank()  const
{
  int rank;
  
  if(m_comm!=MPI_COMM_NULL){
    MPI_Comm_rank(m_comm,&rank);
  } else {
    rank=MPI_UNDEFINED;
  }

  return rank;
}

int TML_Comm::size()
{
  int size;

  if(m_comm!=MPI_COMM_NULL){
    MPI_Comm_size(m_comm,&size);
  } else {
    size=MPI_UNDEFINED;
  }

  return size;
}

/*!
  Construct a new communicator containing the processes
  which are given as input

  \param ids the ranks of the processes which form the new communicator in the current communicator
  \todo error handling
*/
TML_Comm TML_Comm::include(const vector<int>& ids)
{
  TML_Comm newcomm;
  MPI_Group grp,ngrp;

  // extract group
  MPI_Comm_group(m_comm,&grp);
  // vector->array
  int nids=ids.size();
  int *ranks=new int[nids];
  for(int i=0;i<nids;i++){
    ranks[i]=ids[i];
  }
  // make new group
  int err=MPI_Group_incl(grp,nids,ranks,&ngrp);
  if(err!=MPI_SUCCESS){
    console.Error() << "Error in TML_Comm::include group construction, rank " 
	 << rank() << " error " << err << "\n";
  }
  int gsize,grnk;
  MPI_Group_size(ngrp,&gsize);
  MPI_Group_rank(ngrp,&grnk);
  delete ranks;
  // construct new MPI communicator
  err=MPI_Comm_create(m_comm,ngrp,&(newcomm.m_comm));

  if(err!=MPI_SUCCESS){
    console.Error() << "Error in TML_Comm::include communicator construction, rank " 
	 << rank() << " error " << err << "\n";
  }

  return newcomm;
}

/*!
  Construct a new communicator containing the processes from the current communicator 
  except to ones which are given as input

  \param ids the ranks of the processes which are excluded from  the new communicator 
  \todo error handling
*/
TML_Comm TML_Comm::exclude(const vector<int>& ids)
{
  TML_Comm newcomm;
  MPI_Group grp,ngrp;

  // extract group
  MPI_Comm_group(m_comm,&grp);
  // vector->array
  int nids=ids.size();
  int *ranks=new int[nids];
  for(int i=0;i<nids;i++){
    ranks[i]=ids[i];
  }
  // make new group
  MPI_Group_excl(grp,nids,ranks,&ngrp);
  delete ranks;
  // construct new MPI communicator
  MPI_Comm_create(m_comm,ngrp,&(newcomm.m_comm));

  return newcomm;
}

/*!
  Wait on a barrier. Wrapper for MPI_Barrier.
*/
void TML_Comm::barrier()
{
  MPI_Barrier(m_comm);
}

/*!
  Wait on a barrier with debug message
*/
void TML_Comm::barrier(const string& msg)
{
  double m_time=MPI_Wtime();
  int id=rank();
  if(id==0){
    console.Debug() << "Master waiting on Barrier ( " << msg << " )\n";
  } else {
    console.Debug() << "Worker " << id << " waiting on Barrier ( " << msg << " )\n";
  }

  MPI_Barrier(m_comm);
  double p_time=MPI_Wtime();
  if(id==0){
    console.Debug() << "Master past Barrier ( " << msg << " ) after " << p_time-m_time << " sec \n";
  } else {
    console.Debug() << "Worker " << id << " past Barrier ( " << msg << " ) after " << p_time-m_time << " sec \n";
  }
}