File: ftst_parallel_nasa.F

package info (click to toggle)
netcdf-fortran 4.4.4%2Bds-2
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 8,420 kB
  • ctags: 8,797
  • sloc: fortran: 51,087; f90: 20,357; sh: 11,601; ansic: 7,034; makefile: 548; pascal: 313; xml: 173
file content (142 lines) | stat: -rw-r--r-- 4,376 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
!     This is part of the netCDF package. Copyright 2011 University
!     Corporation for Atmospheric Research/Unidata. See COPYRIGHT file
!     for conditions of use. 

!     This program tests netCDF-4 parallel I/O from fortran. This
!     variation of the test was contributed by Dan at NASA. Thanks Dan!

      program ftst_parallel_nasa
      implicit none
      include 'netcdf.inc'
      include 'mpif.h'

      character*(*) FILE_NAME
      parameter (FILE_NAME = 'ftst_parallel_nasa.nc')

      integer MAX_DIMS
      parameter (MAX_DIMS = 2)
      integer NX, NY
      parameter (NX = 16)
      parameter (NY = 16)
      integer NUM_PROC
      parameter (NUM_PROC = 4)
      integer ncid, varid, dimids(MAX_DIMS)
      integer x_dimid, y_dimid
      real data_out(NY / 2, NX / 2), data_in(NY / 2, NX / 2)
      integer mode_flag
      integer x, y, retval
      integer p, my_rank, ierr
      integer start(MAX_DIMS), count(MAX_DIMS)

      call MPI_Init(ierr)
      call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)
      call MPI_Comm_size(MPI_COMM_WORLD, p, ierr)

      if (my_rank .eq. 0) then
         print *, ' '
         print *, '*** Testing netCDF-4 parallel I/O from F77 again.'
      endif

!     There must be 4 procs for this test.
      if (p .ne. 4) then
         print *, 'This test program must be run on 4 processors.'
         stop 2
      endif

!     Create some pretend data.
      do x = 1, NX / 2
         do y = 1, NY / 2
            data_out(y, x) = real(my_rank)
         end do
      end do

!     Create the netCDF file.
      mode_flag = IOR(nf_netcdf4, nf_clobber)
!     mode_flag = IOR(nf_netcdf4, nf_classic_model)
      mode_flag = IOR(mode_flag, nf_mpiio)
      retval = nf_create_par(FILE_NAME, mode_flag, MPI_COMM_WORLD,
     $     MPI_INFO_NULL, ncid)
      if (retval .ne. nf_noerr) stop 3

!     Define the dimensions.
      retval = nf_def_dim(ncid, "x", NX, x_dimid)
      if (retval .ne. nf_noerr) stop 4
      retval = nf_def_dim(ncid, "y", NY, y_dimid)
      if (retval .ne. nf_noerr) stop 5
      dimids(1) = x_dimid
      dimids(2) = y_dimid

!     Define the variable.
      retval = nf_def_var(ncid, "data", NF_FLOAT, MAX_DIMS, dimids,
     $     varid)
      if (retval .ne. nf_noerr) stop 6

!     With classic model netCDF-4 file, enddef must be called.
      retval = nf_enddef(ncid)
      if (retval .ne. nf_noerr) stop 7

!     Determine what part of the variable will be written for this
!     processor. It's a checkerboard decomposition.
      count(1) = NX / 2
      count(2) = NY / 2
      if (my_rank .eq. 0) then
         start(1) = 1
         start(2) = 1
      else if (my_rank .eq. 1) then
         start(1) = NX / 2 + 1
         start(2) = 1
      else if (my_rank .eq. 2) then
         start(1) = 1
         start(2) = NY / 2 + 1
      else if (my_rank .eq. 3) then
         start(1) = NX / 2 + 1
         start(2) = NY / 2 + 1
      endif

!     Write this processor's data.
      retval = nf_put_vara_real(ncid, varid, start, count, data_out)
      if (retval .ne. nf_noerr) then
         print*,'Error writing data ', retval
         print*, NF_STRERROR(retval)
         stop 8
      endif

!     Close the file.
      retval = nf_close(ncid)
      if (retval .ne. nf_noerr) stop 9

!     Reopen the file.
      retval = nf_open_par(FILE_NAME, IOR(nf_nowrite, nf_mpiio),
     $     MPI_COMM_WORLD, MPI_INFO_NULL, ncid)
      if (retval .ne. nf_noerr) stop 10

!     Set collective access on this variable. This will cause all
!     reads/writes to happen together on every processor. Fairly
!     pointless, in this contexct, but I want to at least call this
!     function once in my testing.
      retval = nf_var_par_access(ncid, varid, nf_collective)
      if (retval .ne. nf_noerr) stop 11

!     Read this processor's data.
      retval = nf_get_vara_real(ncid, varid, start, count, data_in)
      if (retval .ne. nf_noerr) stop 12

!     Check the data.
      do x = 1, NX / 2
         do y = 1, NY / 2
            if (data_in(y, x) .ne. my_rank) then
               print*,data_in(y, x), ' NE ', my_rank
               stop 13
            endif
         end do
      end do

!     Close the file.
      retval = nf_close(ncid)
      if (retval .ne. nf_noerr) stop 14

      call MPI_Finalize(ierr)

      if (my_rank .eq. 0) print *,'*** SUCCESS!'

      end program ftst_parallel_nasa