File: ftst_parallel.F

package info (click to toggle)
netcdf-fortran 4.5.3%2Bds-2
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 7,456 kB
  • sloc: fortran: 25,848; f90: 20,793; sh: 4,609; ansic: 1,729; makefile: 585; pascal: 292; xml: 173
file content (157 lines) | stat: -rw-r--r-- 4,558 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
!     This is part of the netCDF package.
!     Copyright 2006 University Corporation for Atmospheric Research/Unidata.
!     See COPYRIGHT file for conditions of use.

!     This program tests netCDF-4 variable functions from fortran.

!     Ed Hartnett

      program ftst_parallel
      implicit none
      include 'netcdf.inc'
      include 'mpif.h'

      integer mode_flag ! file create mode
      integer p, my_rank, ierr

      call MPI_Init(ierr)
      call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)
      call MPI_Comm_size(MPI_COMM_WORLD, p, ierr)

      if (my_rank .eq. 0) then
         print *, ' '
         print *, '*** Testing netCDF-4 parallel I/O from Fortran 77.'
      endif

!     There must be 4 procs for this test.
      if (p .ne. 4) then
         print *, 'This test program must be run on 4 processors.'
         stop 2
      endif

#ifdef NF_HAS_PNETCDF
      mode_flag = IOR(nf_clobber, nf_mpiio)
      call parallel_io(mode_flag)
#endif

#ifdef NF_HAS_PARALLEL4
      mode_flag = IOR(nf_netcdf4, nf_classic_model)
      mode_flag = IOR(mode_flag, nf_clobber)
      mode_flag = IOR(mode_flag, nf_mpiio)
      call parallel_io(mode_flag)
#endif

      call MPI_Finalize(ierr)

      if (my_rank .eq. 0) print *,'*** SUCCESS!'

      end program ftst_parallel

      subroutine parallel_io(mode_flag)
      implicit none
      include 'netcdf.inc'
      include 'mpif.h'

      integer mode_flag ! file create mode

      character*(*) FILE_NAME
      parameter (FILE_NAME = 'ftst_parallel.nc')

      integer MAX_DIMS
      parameter (MAX_DIMS = 2)
      integer NX, NY
      parameter (NX = 16)
      parameter (NY = 16)
      integer NUM_PROC
      parameter (NUM_PROC = 4)
      integer ncid, varid, dimids(MAX_DIMS)
      integer x_dimid, y_dimid
      integer data_out(NY / 2, NX / 2), data_in(NY / 2, NX / 2)
      integer x, y, retval
      integer my_rank, ierr
      integer start(MAX_DIMS), count(MAX_DIMS)

      call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)

!     Create some pretend data.
      do x = 1, NX / 2
         do y = 1, NY / 2
            data_out(y, x) = my_rank
         end do
      end do

!     Create the netCDF file.
      retval = nf_create_par(FILE_NAME, mode_flag, MPI_COMM_WORLD,
     $     MPI_INFO_NULL, ncid)
      if (retval .ne. nf_noerr) stop 3

!     Define the dimensions.
      retval = nf_def_dim(ncid, "x", NX, x_dimid)
      if (retval .ne. nf_noerr) stop 4
      retval = nf_def_dim(ncid, "y", NY, y_dimid)
      if (retval .ne. nf_noerr) stop 5
      dimids(1) = y_dimid
      dimids(2) = x_dimid

!     Define the variable.
      retval = nf_def_var(ncid, "data", NF_INT, MAX_DIMS, dimids, varid)
      if (retval .ne. nf_noerr) stop 6

!     With classic model netCDF-4 file, enddef must be called.
      retval = nf_enddef(ncid)
      if (retval .ne. nf_noerr) stop 7

!     Determine what part of the variable will be written for this
!     processor. It's a checkerboard decomposition.
      count(1) = NX / 2
      count(2) = NY / 2
      if (my_rank .eq. 0) then
         start(1) = 1
         start(2) = 1
      else if (my_rank .eq. 1) then
         start(1) = NX / 2 + 1
         start(2) = 1
      else if (my_rank .eq. 2) then
         start(1) = 1
         start(2) = NY / 2 + 1
      else if (my_rank .eq. 3) then
         start(1) = NX / 2 + 1
         start(2) = NY / 2 + 1
      endif

!     Write this processor's data.
      retval = nf_put_vara_int(ncid, varid, start, count, data_out)
      if (retval .ne. nf_noerr) stop 8

!     Close the file.
      retval = nf_close(ncid)
      if (retval .ne. nf_noerr) stop 9

!     Reopen the file.
      retval = nf_open_par(FILE_NAME, IOR(nf_nowrite, nf_mpiio),
     $     MPI_COMM_WORLD, MPI_INFO_NULL, ncid)
      if (retval .ne. nf_noerr) stop 10

!     Set collective access on this variable. This will cause all
!     reads/writes to happen together on every processor. Fairly
!     pointless, in this contexct, but I want to at least call this
!     function once in my testing.
      retval = nf_var_par_access(ncid, varid, nf_collective)
      if (retval .ne. nf_noerr) stop 11

!     Read this processor's data.
      retval = nf_get_vara_int(ncid, varid, start, count, data_in)
      if (retval .ne. nf_noerr) stop 12

!     Check the data.
      do x = 1, NX / 2
         do y = 1, NY / 2
            if (data_in(y, x) .ne. my_rank) stop 13
         end do
      end do

!     Close the file.
      retval = nf_close(ncid)
      if (retval .ne. nf_noerr) stop 14

      end subroutine parallel_io