1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
|
! This is part of the netCDF package.
! Copyright 2006 University Corporation for Atmospheric Research/Unidata.
! See COPYRIGHT file for conditions of use.
! This program tests netCDF-4 parallel I/O functions from fortran.
! We are writing 2D data, a 6 x 12 grid, on 4 processors. Each
! processor will write it's rank to it's quarter of the array. The
! result will be (in CDL):
!
! netcdf f90tst_parallel {
! dimensions:
! x = 16 ;
! y = 16 ;
! variables:
! int data(x, y) ;
! data:
!
! data =
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
! 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 ;
! }
! Ed Hartnett
program f90tst_parallel
use netcdf
implicit none
include 'mpif.h'
integer :: mode_flag
integer :: p, my_rank, ierr
call MPI_Init(ierr)
call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)
call MPI_Comm_size(MPI_COMM_WORLD, p, ierr)
if (my_rank .eq. 0) then
print *, ' '
print *, '*** Testing netCDF-4 parallel I/O from Fortran 90.'
endif
! There must be 4 procs for this test.
if (p .ne. 4) then
print *, 'Sorry, this test program must be run on four processors.'
stop 2
endif
#ifdef NF_HAS_PNETCDF
mode_flag = IOR(nf90_clobber, nf90_mpiio)
call parallel_io(mode_flag)
#endif
#ifdef NF_HAS_PARALLEL4
mode_flag = IOR(nf90_netcdf4, nf90_classic_model)
mode_flag = IOR(mode_flag, nf90_clobber)
mode_flag = IOR(mode_flag, nf90_mpiio)
call parallel_io(mode_flag)
#endif
call MPI_Finalize(ierr)
if (my_rank .eq. 0) print *,'*** SUCCESS!'
contains
! This subroutine handles errors by printing an error message and
! exiting with a non-zero status.
subroutine handle_err(errcode)
use netcdf
implicit none
integer, intent(in) :: errcode
if(errcode /= nf90_noerr) then
print *, 'Error: ', trim(nf90_strerror(errcode))
stop 2
endif
end subroutine handle_err
subroutine parallel_io(mode_flag)
use typeSizes
use netcdf
implicit none
include 'mpif.h'
integer :: mode_flag
! This is the name of the data file we will create.
character (len = *), parameter :: FILE_NAME = "f90tst_parallel.nc"
integer, parameter :: MAX_DIMS = 2
integer, parameter :: NX = 16, NY = 16
integer, parameter :: NUM_PROC = 4
integer :: ncid, varid, dimids(MAX_DIMS)
integer :: x_dimid, y_dimid
integer :: data_out(NY / 2, NX / 2), data_in(NY / 2, NX / 2)
integer :: nvars, ngatts, ndims, unlimdimid, file_format
integer :: x, y
integer :: my_rank, ierr
integer :: start(MAX_DIMS), count(MAX_DIMS)
call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)
! Create some pretend data.
do x = 1, NX / 2
do y = 1, NY / 2
data_out(y, x) = my_rank
end do
end do
! Create the netCDF file.
call handle_err(nf90_create(FILE_NAME, mode_flag, ncid, comm = MPI_COMM_WORLD, &
info = MPI_INFO_NULL))
! Define the dimensions.
call handle_err(nf90_def_dim(ncid, "x", NX, x_dimid))
call handle_err(nf90_def_dim(ncid, "y", NY, y_dimid))
dimids = (/ y_dimid, x_dimid /)
! Define the variable.
call handle_err(nf90_def_var(ncid, "data", NF90_INT, dimids, varid))
! With classic model netCDF-4 file, enddef must be called.
call handle_err(nf90_enddef(ncid))
! Determine what part of the variable will be written for this
! processor. It's a checkerboard decomposition.
count = (/ NX / 2, NY / 2 /)
if (my_rank .eq. 0) then
start = (/ 1, 1 /)
else if (my_rank .eq. 1) then
start = (/ NX / 2 + 1, 1 /)
else if (my_rank .eq. 2) then
start = (/ 1, NY / 2 + 1 /)
else if (my_rank .eq. 3) then
start = (/ NX / 2 + 1, NY / 2 + 1 /)
endif
! Write this processor's data.
call handle_err(nf90_put_var(ncid, varid, data_out, start = start, count = count))
! Close the file.
call handle_err(nf90_close(ncid))
! Reopen the file.
call handle_err(nf90_open(FILE_NAME, IOR(nf90_nowrite, nf90_mpiio), ncid, &
comm = MPI_COMM_WORLD, info = MPI_INFO_NULL))
! Check some stuff out.
call handle_err(nf90_inquire(ncid, ndims, nvars, ngatts, unlimdimid, file_format))
if (ndims /= 2 .or. nvars /= 1 .or. ngatts /= 0 .or. unlimdimid /= -1) stop 3
if (IAND(mode_flag, nf90_netcdf4) .GT. 0) then
if (file_format /= nf90_format_netcdf4_classic) stop 4
else
if (file_format /= nf90_format_classic) stop 5
endif
! Set collective access on this variable. This will cause all
! reads/writes to happen together on every processor. Fairly
! pointless, in this contexct, but I want to at least call this
! function once in my testing.
call handle_err(nf90_var_par_access(ncid, varid, nf90_collective))
! Read this processor's data.
call handle_err(nf90_get_var(ncid, varid, data_in, start = start, count = count))
! Check the data.
do x = 1, NX / 2
do y = 1, NY / 2
if (data_in(y, x) .ne. my_rank) stop 6
end do
end do
! Close the file.
call handle_err(nf90_close(ncid))
end subroutine parallel_io
end program f90tst_parallel
|