File: recvec.f90

package info (click to toggle)
espresso 6.7-4
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 311,068 kB
  • sloc: f90: 447,429; ansic: 52,566; sh: 40,631; xml: 37,561; tcl: 20,077; lisp: 5,923; makefile: 4,503; python: 4,379; perl: 1,219; cpp: 761; fortran: 618; java: 568; awk: 128
file content (248 lines) | stat: -rw-r--r-- 8,529 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
!
! Copyright (C) 2010 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!=----------------------------------------------------------------------------=!
   MODULE gvect
!=----------------------------------------------------------------------------=!

     ! ... variables describing the reciprocal lattice vectors
     ! ... G vectors with |G|^2 < ecutrho, cut-off for charge density
     ! ... With gamma tricks, G-vectors are divided into two half-spheres,
     ! ... G> and G<, containing G and -G (G=0 is in G>)
     ! ... This is referred to as the "dense" (or "hard", or "thick") grid

     USE kinds, ONLY: DP

     IMPLICIT NONE
     SAVE

     INTEGER :: ngm  = 0  ! local  number of G vectors (on this processor)
                          ! with gamma tricks, only vectors in G>
     INTEGER :: ngm_g= 0  ! global number of G vectors (summed on all procs)
                          ! in serial execution, ngm_g = ngm
     INTEGER :: ngl = 0   ! number of G-vector shells
     INTEGER :: ngmx = 0  ! local number of G vectors, maximum across all procs

     REAL(DP) :: ecutrho = 0.0_DP ! energy cut-off for charge density 
     REAL(DP) :: gcutm = 0.0_DP   ! ecutrho/(2 pi/a)^2, cut-off for |G|^2

     INTEGER :: gstart = 2 ! index of the first G vector whose module is > 0
                           ! Needed in parallel execution: gstart=2 for the
                           ! proc that holds G=0, gstart=1 for all others

     !     G^2 in increasing order (in units of tpiba2=(2pi/a)^2)
     !
     REAL(DP), ALLOCATABLE, TARGET :: gg(:) 

     !     gl(i) = i-th shell of G^2 (in units of tpiba2)
     !     igtongl(n) = shell index for n-th G-vector
     !
     REAL(DP), POINTER, PROTECTED            :: gl(:)
     INTEGER, ALLOCATABLE, TARGET, PROTECTED :: igtongl(:)
     !
     !     G-vectors cartesian components ( in units tpiba =(2pi/a)  )
     !
     REAL(DP), ALLOCATABLE, TARGET :: g(:,:) 

     !     mill = miller index of G vectors (local to each processor)
     !            G(:) = mill(1)*bg(:,1)+mill(2)*bg(:,2)+mill(3)*bg(:,3) 
     !            where bg are the reciprocal lattice basis vectors 
     !
     INTEGER, ALLOCATABLE, TARGET :: mill(:,:)
     
     !     ig_l2g  = converts a local G-vector index into the global index
     !               ("l2g" means local to global): ig_l2g(i) = index of i-th
     !               local G-vector in the global array of G-vectors
     !
     INTEGER, ALLOCATABLE, TARGET :: ig_l2g(:)
     !
     !     mill_g  = miller index of all G vectors
     !
     INTEGER, ALLOCATABLE, TARGET :: mill_g(:,:)
     !
     ! the phases e^{-iG*tau_s} used to calculate structure factors
     !
     COMPLEX(DP), ALLOCATABLE :: eigts1(:,:), eigts2(:,:), eigts3(:,:)
     !
   CONTAINS

     SUBROUTINE gvect_init( ngm_ , comm )
       !
       ! Set local and global dimensions, allocate arrays
       !
       USE mp, ONLY: mp_max, mp_sum
       IMPLICIT NONE
       INTEGER, INTENT(IN) :: ngm_
       INTEGER, INTENT(IN) :: comm  ! communicator of the group on which g-vecs are distributed
       !
       ngm = ngm_
       !
       !  calculate maximum over all processors
       !
       ngmx = ngm
       CALL mp_max( ngmx, comm )
       !
       !  calculate sum over all processors
       !
       ngm_g = ngm
       CALL mp_sum( ngm_g, comm )
       !
       !  allocate arrays - only those that are always kept until the end
       !
       ALLOCATE( gg(ngm) )
       ALLOCATE( g(3, ngm) )
       ALLOCATE( mill(3, ngm) )
       ALLOCATE( ig_l2g(ngm) )
       ALLOCATE( igtongl(ngm) )
       !
       RETURN 
       !
     END SUBROUTINE gvect_init

     SUBROUTINE deallocate_gvect(vc)
       IMPLICIT NONE
       !
       LOGICAL, OPTIONAL, INTENT(IN) :: vc
       LOGICAL :: vc_
       !
       vc_ = .false.
       IF (PRESENT(vc)) vc_ = vc
       IF ( .NOT. vc_ ) THEN
          IF ( ASSOCIATED( gl ) ) DEALLOCATE ( gl )
       END IF
       !
       IF( ALLOCATED( gg ) ) DEALLOCATE( gg )
       IF( ALLOCATED( g ) )  DEALLOCATE( g )
       IF( ALLOCATED( mill_g ) ) DEALLOCATE( mill_g )
       IF( ALLOCATED( mill ) ) DEALLOCATE( mill )
       IF( ALLOCATED( igtongl ) ) DEALLOCATE( igtongl )
       IF( ALLOCATED( ig_l2g ) ) DEALLOCATE( ig_l2g )
       IF( ALLOCATED( eigts1 ) ) DEALLOCATE( eigts1 )
       IF( ALLOCATED( eigts2 ) ) DEALLOCATE( eigts2 )
       IF( ALLOCATED( eigts3 ) ) DEALLOCATE( eigts3 )
     END SUBROUTINE deallocate_gvect

     SUBROUTINE deallocate_gvect_exx()
       IF( ALLOCATED( gg ) ) DEALLOCATE( gg )
       IF( ALLOCATED( g ) )  DEALLOCATE( g )
       IF( ALLOCATED( mill ) ) DEALLOCATE( mill )
       IF( ALLOCATED( igtongl ) ) DEALLOCATE( igtongl )
       IF( ALLOCATED( ig_l2g ) ) DEALLOCATE( ig_l2g )
     END SUBROUTINE deallocate_gvect_exx
     !
     !-----------------------------------------------------------------------
     SUBROUTINE gshells ( vc )
        !----------------------------------------------------------------------
        !
        ! calculate number of G shells: ngl, and the index ng = igtongl(ig)
        ! that gives the shell index ng for (local) G-vector of index ig
        !
        USE kinds,              ONLY : DP
        USE constants,          ONLY : eps8
        !
        IMPLICIT NONE
        !
        LOGICAL, INTENT(IN) :: vc
        !
        INTEGER :: ng, igl
        !
        IF ( vc ) THEN
           !
           ! in case of a variable cell run each G vector has its shell
           !
           ngl = ngm
           gl => gg
           DO ng = 1, ngm
              igtongl (ng) = ng
           ENDDO
        ELSE
           !
           ! G vectors are grouped in shells with the same norm
           !
           ngl = 1
           igtongl (1) = 1
           DO ng = 2, ngm
              IF (gg (ng) > gg (ng - 1) + eps8) THEN
                 ngl = ngl + 1
              ENDIF
              igtongl (ng) = ngl
           ENDDO

           ALLOCATE (gl( ngl))
           gl (1) = gg (1)
           igl = 1
           DO ng = 2, ngm
              IF (gg (ng) > gg (ng - 1) + eps8) THEN
                 igl = igl + 1
                 gl (igl) = gg (ng)
              ENDIF
           ENDDO

           IF (igl /= ngl) CALL errore ('gshells', 'igl <> ngl', ngl)

        ENDIF
     END SUBROUTINE gshells
!=----------------------------------------------------------------------------=!
   END MODULE gvect
!=----------------------------------------------------------------------------=!

!=----------------------------------------------------------------------------=!
   MODULE gvecs
!=----------------------------------------------------------------------------=!
     USE kinds, ONLY: DP

     IMPLICIT NONE
     SAVE

     ! ... G vectors with |G|^2 < 4*ecutwfc, cut-off for wavefunctions
     ! ... ("smooth" grid). Gamma tricks and units as for the "dense" grid
     !
     INTEGER :: ngms = 0  ! local  number of smooth vectors (on this processor)
     INTEGER :: ngms_g=0  ! global number of smooth vectors (summed on procs) 
                          ! in serial execution this is equal to ngms
     INTEGER :: ngsx = 0  ! local number of smooth vectors, max across procs

     REAL(DP) :: ecuts = 0.0_DP   ! energy cut-off = 4*ecutwfc
     REAL(DP) :: gcutms= 0.0_DP   ! ecuts/(2 pi/a)^2, cut-off for |G|^2

     REAL(DP) :: dual = 0.0_DP    ! ecutrho=dual*ecutwfc
     LOGICAL  :: doublegrid = .FALSE. ! true if smooth and dense grid differ
                                      ! doublegrid = (dual > 4)

   CONTAINS

     SUBROUTINE gvecs_init( ngs_ , comm )
       USE mp, ONLY: mp_max, mp_sum
       IMPLICIT NONE
       INTEGER, INTENT(IN) :: ngs_
       INTEGER, INTENT(IN) :: comm  ! communicator of the group on which g-vecs are distributed
       !
       ngms = ngs_
       !
       !  calculate maximum over all processors
       !
       ngsx = ngms
       CALL mp_max( ngsx, comm )
       !
       !  calculate sum over all processors
       !
       ngms_g = ngms
       CALL mp_sum( ngms_g, comm )
       !
       !  allocate arrays 
       !
       ! ALLOCATE( nls (ngms) )
       ! ALLOCATE( nlsm(ngms) )
       !
       RETURN 
       !
     END SUBROUTINE gvecs_init

!=----------------------------------------------------------------------------=!
   END MODULE gvecs
!=----------------------------------------------------------------------------=!