1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
|
! This file created from f77/io/setviewcurf.f with f77tof90
!
! Copyright (C) by Argonne National Laboratory
! See COPYRIGHT in top-level directory
!
program main
use mpi
integer (kind=MPI_OFFSET_KIND) offset
integer errs, ierr, size, rank
integer fh, comm, status(MPI_STATUS_SIZE)
integer buf(1024)
errs = 0
call MTest_Init( ierr )
! This test reads a header then sets the view to every "size" int,
! using set view and current displacement. The file is first written
! using a combination of collective and ordered writes
comm = MPI_COMM_WORLD
call MPI_File_open( comm, "test.ord", MPI_MODE_WRONLY + &
& MPI_MODE_CREATE, MPI_INFO_NULL, fh, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Open(1)", ierr )
endif
call MPI_Comm_size( comm, size, ierr )
call MPI_Comm_rank( comm, rank, ierr )
if (size .gt. 1024) then
if (rank .eq. 0) then
print *, &
&"This program must be run with no more than 1024 processes"
call MPI_Abort( MPI_COMM_WORLD, 1, ierr )
endif
endif
buf(1) = size
call MPI_File_write_all( fh, buf, 1, MPI_INTEGER, status, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Write_all", ierr )
endif
call MPI_File_get_position( fh, offset, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Get_position", ierr )
endif
call MPI_File_seek_shared( fh, offset, MPI_SEEK_SET, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Seek_shared", ierr )
endif
buf(1) = rank
call MPI_File_write_ordered( fh, buf, 1, MPI_INTEGER, status,ierr)
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Write_ordered", ierr )
endif
call MPI_File_close( fh, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Close(1)", ierr )
endif
! Reopen the file as sequential
call MPI_File_open( comm, "test.ord", MPI_MODE_RDONLY + &
& MPI_MODE_SEQUENTIAL + MPI_MODE_DELETE_ON_CLOSE, &
& MPI_INFO_NULL, fh, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Open(Read)", ierr )
endif
if (rank .eq. 0) then
call MPI_File_read_shared( fh, buf, 1, MPI_INTEGER, status, &
& ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Read_all", ierr )
endif
if (buf(1) .ne. size) then
errs = errs + 1
print *, "Unexpected value for the header = ", buf(1), &
& ", should be ", size
endif
endif
call MPI_Barrier( comm, ierr )
! All processes must provide the same file view for MODE_SEQUENTIAL
call MPI_File_set_view( fh, MPI_DISPLACEMENT_CURRENT, MPI_INTEGER &
& ,MPI_INTEGER, "native", MPI_INFO_NULL, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Set_view", ierr )
endif
buf(1) = -1
call MPI_File_read_ordered( fh, buf, 1, MPI_INTEGER, status, ierr &
& )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Read_all", ierr )
endif
if (buf(1) .ne. rank) then
errs = errs + 1
print *, rank, ": buf(1) = ", buf(1)
endif
call MPI_File_close( fh, ierr )
if (ierr .ne. MPI_SUCCESS) then
errs = errs + 1
call MTestPrintErrorMsg( "Close(2)", ierr )
endif
call MTest_Finalize( errs )
end
|