File: ad_pvfs_flush.c

package info (click to toggle)
openmpi 4.1.4-3
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 127,592 kB
  • sloc: ansic: 690,998; makefile: 43,047; f90: 19,220; sh: 7,182; java: 6,360; perl: 3,590; cpp: 2,227; python: 1,350; lex: 989; fortran: 61; tcl: 12
file content (36 lines) | stat: -rw-r--r-- 1,174 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/* 
 *
 *   Copyright (C) 1997 University of Chicago. 
 *   See COPYRIGHT notice in top-level directory.
 */

#include "ad_pvfs.h"

void ADIOI_PVFS_Flush(ADIO_File fd, int *error_code)
{
    int err, rank, dummy=0, dummy_in=0;
    static char myname[] = "ADIOI_PVFS_FLUSH";

    /* a collective routine: because we do not cache data in PVFS1, one process
     * can initiate the fsync operation and broadcast the result to the others.
     * One catch: MPI_File_sync has special meaning with respect to file system
     * consistency.  Ensure no clients have outstanding write operations.
     */

    MPI_Comm_rank(fd->comm, &rank);
    MPI_Reduce(&dummy_in, &dummy, 1, MPI_INT, MPI_SUM, 
		    fd->hints->ranklist[0], fd->comm);
    if (rank == fd->hints->ranklist[0]) {
	    err = pvfs_fsync(fd->fd_sys);
    }
    MPI_Bcast(&err, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);

    if (err == -1) {
	*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
					   myname, __LINE__, MPI_ERR_IO,
					   "**io",
					   "**io %s", strerror(errno));
    }
    else *error_code = MPI_SUCCESS;
}