File: mp_pools.f90

package info (click to toggle)
espresso 6.7-2
  • links: PTS, VCS
  • area: main
  • in suites: bookworm, bullseye
  • size: 311,040 kB
  • sloc: f90: 447,429; ansic: 52,566; sh: 40,631; xml: 37,561; tcl: 20,077; lisp: 5,923; makefile: 4,502; python: 4,379; perl: 1,219; cpp: 761; fortran: 618; java: 568; awk: 128
file content (190 lines) | stat: -rw-r--r-- 6,736 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
!
! Copyright (C) 2013 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!----------------------------------------------------------------------------
MODULE mp_pools
  !----------------------------------------------------------------------------
  !
  USE mp, ONLY : mp_barrier, mp_size, mp_rank, mp_comm_split
  USE parallel_include
  !
  IMPLICIT NONE 
  SAVE
  !
  ! ... Pool groups (processors within a pool of k-points)
  ! ... Subdivision of image group, used for k-point parallelization 
  !
  INTEGER :: npool       = 1  ! number of "k-points"-pools
  INTEGER :: nproc_pool  = 1  ! number of processors within a pool
  INTEGER :: me_pool     = 0  ! index of the processor within a pool 
  INTEGER :: root_pool   = 0  ! index of the root processor within a pool
  INTEGER :: my_pool_id  = 0  ! index of my pool
  INTEGER :: inter_pool_comm  = 0  ! inter pool communicator
  INTEGER :: intra_pool_comm  = 0  ! intra pool communicator
  !
  INTEGER :: kunit = 1  ! granularity of k-point distribution 
                        ! kunit=1 standard case. In phonon k and k+q must
                        ! be on the same pool, so kunit=2.
  ! 
CONTAINS
  !
  !----------------------------------------------------------------------------
  SUBROUTINE mp_start_pools( npool_, parent_comm )
    !---------------------------------------------------------------------------
    !
    ! ... Divide processors (of the "parent_comm" group) into "pools"
    ! ... Requires: npool_, read from command line
    ! ...           parent_comm, typically world_comm = group of all processors
    !
    IMPLICIT NONE
    !
    INTEGER, INTENT(IN) :: npool_, parent_comm
    !
    INTEGER :: parent_nproc = 1, parent_mype  = 0
    !
#if defined (__MPI)
    !
    parent_nproc = mp_size( parent_comm )
    parent_mype  = mp_rank( parent_comm )
    !
    ! ... npool_ must have been previously read from command line argument
    ! ... by a call to routine get_command_line
    !
    npool = npool_
    IF ( npool < 1 .OR. npool > parent_nproc ) CALL errore( 'mp_start_pools',&
                          'invalid number of pools, out of range', 1 )

    IF ( MOD( parent_nproc, npool ) /= 0 ) CALL errore( 'mp_start_pools', &
           'invalid number of pools, parent_nproc /= nproc_pool * npool', 1 )  
    !
    ! ... number of cpus per pool of k-points (created inside each parent group)
    !
    nproc_pool = parent_nproc / npool
    !
    !
    ! ... my_pool_id  =  pool index for this processor    ( 0 : npool - 1 )
    ! ... me_pool     =  processor index within the pool  ( 0 : nproc_pool - 1 )
    !
    my_pool_id = parent_mype / nproc_pool    
    me_pool    = MOD( parent_mype, nproc_pool )
    !
    CALL mp_barrier( parent_comm )
    !
    ! ... the intra_pool_comm communicator is created
    !
    CALL mp_comm_split ( parent_comm, my_pool_id, parent_mype, intra_pool_comm )
    !
    CALL mp_barrier( parent_comm )
    !
    ! ... the inter_pool_comm communicator is created
    !
    CALL mp_comm_split ( parent_comm, me_pool, parent_mype, inter_pool_comm )
    !
#endif
    !
    RETURN
  END SUBROUTINE mp_start_pools
  !
END MODULE mp_pools



!----------------------------------------------------------------------------
MODULE mp_orthopools
  !----------------------------------------------------------------------------
  !
  USE mp, ONLY : mp_barrier, mp_size, mp_rank, mp_comm_split
  USE mp_pools
  USE parallel_include
  !
  IMPLICIT NONE 
  SAVE
  !
  ! ... Ortho-pool groups each orthopool group collect the (n+1)th CPU of each pool
  ! i.e. orthopool 0 -> first CPU of each pool
  !      orthopool 1 -> second CPU of each pool
  !
  INTEGER :: northopool       = 1  ! number of "k-points"-orthopools, must be equal to nproc_pool
  INTEGER :: nproc_orthopool  = 1  ! number of processors within a orthopool, must be equal to npool
  INTEGER :: me_orthopool     = 0  ! index of the processor within a orthopool, 
                                   ! must be equal to the pool id of that cpu
  INTEGER :: root_orthopool   = 0  ! index of the root processor within a orthopool
  INTEGER :: my_orthopool_id  = 0  ! index of my orthopool
  INTEGER :: inter_orthopool_comm  = 0  ! inter orthopool communicator
  INTEGER :: intra_orthopool_comm  = 0  ! intra orthopool communicator
  !
  LOGICAL,PRIVATE :: init_orthopools = .false.
  ! 
CONTAINS
  !
  !----------------------------------------------------------------------------
  SUBROUTINE mp_stop_orthopools( )
    USE mp, ONLY : mp_comm_free
    IMPLICIT NONE
    ! Free the orthopools communicators (if they had been set up)
    IF(init_orthopools) THEN
      CALL mp_comm_free ( inter_orthopool_comm )
      CALL mp_comm_free ( intra_orthopool_comm )
      init_orthopools = .false.
    ENDIF
    !
    RETURN
  END SUBROUTINE
  !
  !----------------------------------------------------------------------------
  SUBROUTINE mp_start_orthopools( parent_comm )
    !---------------------------------------------------------------------------
    !
    ! ... Divide processors (of the "parent_comm" group) into "orthopools"
    ! ... Requires: pools being already initialized
    ! ...           parent_comm, typically world_comm = group of all processors
    !
    IMPLICIT NONE
    !
    INTEGER, INTENT(IN) :: parent_comm
    !
    INTEGER :: parent_nproc = 1, parent_mype  = 0
    !
    ! Only init this once (I put this check because initialisation 
    ! of orthopools is done later, during EXX bootstrap, not at the beginning
    IF(init_orthopools) RETURN
    init_orthopools = .true.
    !
#if defined (__MPI)
    !
    parent_nproc = mp_size( parent_comm )
    parent_mype  = mp_rank( parent_comm )
    !
    northopool = nproc_pool
    !
    ! ... number of cpus per orthopool 
    nproc_orthopool = npool
    !
    !
    ! ... my_orthopool_id  =  orthopool index for this processor    ( 0 : northopool - 1 )
    ! ... me_orthopool     =  processor index within the orthopool  ( 0 : nproc_orthopool - 1 )
    my_orthopool_id = MOD(parent_mype, northopool)
    me_orthopool    = my_pool_id
    !
    CALL mp_barrier( parent_comm )
    !
    ! ... the intra_orthopool_comm communicator is created
    !
    CALL mp_comm_split ( parent_comm, my_orthopool_id, parent_mype, intra_orthopool_comm )
    !
    CALL mp_barrier( parent_comm )
    !
    ! ... the inter_orthopool_comm communicator is created
    !
    CALL mp_comm_split ( parent_comm, me_orthopool, parent_mype, inter_orthopool_comm )
    !
#endif
    !
    RETURN
  END SUBROUTINE mp_start_orthopools
  !
END MODULE mp_orthopools