File: ad_gpfs_flush.c

package info (click to toggle)
openmpi 4.1.4-3
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 127,592 kB
  • sloc: ansic: 690,998; makefile: 43,047; f90: 19,220; sh: 7,182; java: 6,360; perl: 3,590; cpp: 2,227; python: 1,350; lex: 989; fortran: 61; tcl: 12
file content (68 lines) | stat: -rw-r--r-- 2,088 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/* ---------------------------------------------------------------- */
/* (C)Copyright IBM Corp.  2007, 2008                               */
/* ---------------------------------------------------------------- */
/**
 * \file ad_gpfs_flush.c
 * \brief Scalable flush for GPFS
 */

/* -*- Mode: C; c-basic-offset:4 ; -*- */
/* 
 *
 *   Copyright (C) 1997 University of Chicago. 
 *   See COPYRIGHT notice in top-level directory.
 */

#include "ad_gpfs.h"

void ADIOI_GPFS_Flush(ADIO_File fd, int *error_code)
{
    int err=0;
    static char myname[] = "ADIOI_GPFS_FLUSH";

    int rank;

    MPI_Comm_rank(fd->comm, &rank);

    /* the old logic about who is an fsync aggregator and who is not fell down
     * when deferred open was enabled.  Instead, make this look more like
     * ad_pvfs2_flush.  If one day the I/O aggregators have something they need
     * to flush, we can consult the 'fd->hints->ranklist[]' array.  For now, a
     * flush from one process should suffice */

    /* ensure all other proceses are done writing. On many platforms MPI_Reduce
     * is fastest because it has the lightest constraints. On Blue Gene, BARRIER
     * is optimized  */
    MPI_Barrier(fd->comm);

    if (rank == fd->hints->ranklist[0]) {
	err = fsync(fd->fd_sys);
	DBG_FPRINTF(stderr,"aggregation:fsync %s, err=%#X, errno=%#X\n",fd->filename, err, errno);
	/* We want errno, not the return code if it failed */
	if (err == -1) err = errno;
	else err = 0;
    }
    MPI_Bcast(&err, 1, MPI_UNSIGNED, fd->hints->ranklist[0], fd->comm);
    DBGV_FPRINTF(stderr,"aggregation result:fsync %s, errno %#X,\n",fd->filename, err);

    if (err) /* if it's non-zero, it must be an errno */
    {
	errno = err;
	err = -1;
    }

    /* --BEGIN ERROR HANDLING-- */
    if (err == -1)
    {
	*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
		myname, __LINE__, MPI_ERR_IO,
		"**io",
		"**io %s", strerror(errno));
	DBGT_FPRINTF(stderr,"fsync %s, err=%#X, errno=%#X\n",fd->filename, err, errno);
	return;
    }
    /* --END ERROR HANDLING-- */

    *error_code = MPI_SUCCESS;
}