1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
|
! This is part of the netCDF package.
! Copyright 2006 University Corporation for Atmospheric Research/Unidata.
! See COPYRIGHT file for conditions of use.
! This program tests netCDF-4 variable functions from fortran.
! $Id: ftst_parallel.F,v 1.3 2009/01/25 15:47:47 ed Exp $
program ftst_parallel
USE netcdf4_f03
implicit none
include 'mpif.h'
character*(*) FILE_NAME
parameter (FILE_NAME = 'ftst_parallel.nc')
integer MAX_DIMS
parameter (MAX_DIMS = 2)
integer NX, NY
parameter (NX = 16)
parameter (NY = 16)
integer NUM_PROC
parameter (NUM_PROC = 4)
integer ncid, varid, dimids(MAX_DIMS)
integer x_dimid, y_dimid, contig
integer data_out(NY / 2, NX / 2), data_in(NY / 2, NX / 2)
integer mode_flag
integer nvars, ngatts, ndims, unlimdimid, file_format
integer x, y, retval
integer p, my_rank, ierr
integer start(MAX_DIMS), count(MAX_DIMS)
call MPI_Init(ierr)
call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)
call MPI_Comm_size(MPI_COMM_WORLD, p, ierr)
if (my_rank .eq. 0) then
print *," "
print *, '*** Testing netCDF-4 parallel I/O from Fortran 77.'
endif
! There must be 4 procs for this test.
if (p .ne. 4) then
print *, 'This test program must be run on 4 processors.'
stop 2
endif
! Create some pretend data.
do x = 1, NX / 2
do y = 1, NY / 2
data_out(y, x) = my_rank
end do
end do
! Create the netCDF file.
mode_flag = IOR(nf_netcdf4, nf_classic_model)
retval = nf_create_par(FILE_NAME, mode_flag, MPI_COMM_WORLD,
$ MPI_INFO_NULL, ncid)
if (retval .ne. nf_noerr) stop 2
! Define the dimensions.
retval = nf_def_dim(ncid, "x", NX, x_dimid)
if (retval .ne. nf_noerr) stop 2
retval = nf_def_dim(ncid, "y", NY, y_dimid)
if (retval .ne. nf_noerr) stop 2
dimids(1) = y_dimid
dimids(2) = x_dimid
! Define the variable.
retval = nf_def_var(ncid, "data", NF_INT, MAX_DIMS, dimids, varid)
if (retval .ne. nf_noerr) stop 2
! With classic model netCDF-4 file, enddef must be called.
retval = nf_enddef(ncid)
if (retval .ne. nf_noerr) stop 2
! Determine what part of the variable will be written for this
! processor. It's a checkerboard decomposition.
count(1) = NX / 2
count(2) = NY / 2
if (my_rank .eq. 0) then
start(1) = 1
start(2) = 1
else if (my_rank .eq. 1) then
start(1) = NX / 2 + 1
start(2) = 1
else if (my_rank .eq. 2) then
start(1) = 1
start(2) = NY / 2 + 1
else if (my_rank .eq. 3) then
start(1) = NX / 2 + 1
start(2) = NY / 2 + 1
endif
! Write this processor's data.
retval = nf_put_vara_int(ncid, varid, start, count, data_out)
if (retval .ne. nf_noerr) stop 2
! Close the file.
retval = nf_close(ncid)
if (retval .ne. nf_noerr) stop 2
! Reopen the file.
retval = nf_open_par(FILE_NAME, nf_nowrite, MPI_COMM_WORLD,
$ MPI_INFO_NULL, ncid)
if (retval .ne. nf_noerr) stop 2
! Set collective access on this variable. This will cause all
! reads/writes to happen together on every processor. Fairly
! pointless, in this contexct, but I want to at least call this
! function once in my testing.
retval = nf_var_par_access(ncid, varid, nf_collective)
if (retval .ne. nf_noerr) stop 2
! Read this processor's data.
retval = nf_get_vara_int(ncid, varid, start, count, data_in)
if (retval .ne. nf_noerr) stop 2
! Check the data.
do x = 1, NX / 2
do y = 1, NY / 2
if (data_in(y, x) .ne. my_rank) stop 3
end do
end do
! Close the file.
retval = nf_close(ncid)
if (retval .ne. nf_noerr) stop 2
call MPI_Finalize(ierr)
if (my_rank .eq. 0) print *,'*** SUCCESS!'
end program ftst_parallel
|