1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
|
/*********************************************************************
*
* Copyright (C) 2012, Northwestern University and Argonne National Laboratory
* See COPYRIGHT notice in top-level directory.
*
*********************************************************************/
/* $Id$ */
/* simple demonstration of pnetcdf
* text attribute on dataset
* write out rank into 1-d array collectively.
* This example demonstrates the non-blocking write interface */
/* This program creates a file, say named output.nc, with the following
contents, shown by running ncmpidump command .
% mpiexec -n 4 pnetcdf-write-nb /orangefs/wkliao/output.nc
% ncmpidump /orangefs/wkliao/output.nc
netcdf output {
// file format: CDF-2 (large file)
dimensions:
d1 = 4 ;
variables:
int v1(d1) ;
int v2(d1) ;
// global attributes:
:string = "Hello World\n",
"" ;
data:
v1 = 0, 1, 2, 3 ;
v2 = 0, 1, 2, 3 ;
}
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h> /* strncpy() */
#include <assert.h>
#include <mpi.h>
#include <pnetcdf.h>
static void handle_error(int status, int lineno)
{
fprintf(stderr, "Error at line %d: %s\n", lineno, ncmpi_strerror(status));
MPI_Abort(MPI_COMM_WORLD, 1);
}
int main(int argc, char **argv) {
int ret, ncfile, nprocs, rank, dimid, varid1, varid2, ndims=1;
char filename[256], buf[13] = "Hello World\n";
int data1, data2, requests[2], statuses[2];
MPI_Offset start, count=1;
MPI_Info info;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (argc > 2) {
if (rank == 0) printf("Usage: %s filename\n", argv[0]);
MPI_Finalize();
exit(-1);
}
if (argc > 1) snprintf(filename, 256, "%s", argv[1]);
else strcpy(filename, "testfile.nc");
MPI_Info_create(&info);
MPI_Info_set(info, "nc_var_align_size", "1");
ret = ncmpi_create(MPI_COMM_WORLD, filename,
NC_CLOBBER|NC_64BIT_OFFSET, info, &ncfile);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
MPI_Info_free(&info);
ret = ncmpi_def_dim(ncfile, "d1", nprocs, &dimid);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_def_var(ncfile, "v1", NC_INT, ndims, &dimid, &varid1);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_def_var(ncfile, "v2", NC_INT, ndims, &dimid, &varid2);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_put_att_text(ncfile, NC_GLOBAL, "string", 13, buf);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
/* all processors defined the dimensions, attributes, and variables,
* but here in ncmpi_enddef is the one place where metadata I/O
* happens. Behind the scenes, rank 0 takes the information and writes
* the netcdf header. All processes communicate to ensure they have
* the same (cached) view of the dataset */
ret = ncmpi_enddef(ncfile);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
start=rank, count=1, data1=rank, data2=rank;
/* in this simple example every process writes its rank to two 1d variables */
/* we used a basic MPI_INT type to this flexible mode call, but could
* have used any derived MPI datatype that describes application data
* structures */
/* furthermore, we use the non-blocking interface to essentially
* schedule the two write operations. No i/o actually happens here,
* which is why these routines do not need to be collective. */
ret = ncmpi_iput_vara(ncfile, varid1, &start, &count, &data1, count,
MPI_INT, &requests[0]);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_iput_vara(ncfile, varid2, &start, &count, &data2, count,
MPI_INT, &requests[1]);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_wait_all(ncfile, 2, requests, statuses);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
/* check status of each nonblocking call */
if (statuses[0] != NC_NOERR) handle_error(statuses[0], __LINE__);
if (statuses[1] != NC_NOERR) handle_error(statuses[1], __LINE__);
ret = ncmpi_close(ncfile);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
MPI_Finalize();
return 0;
}
|