1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
|
/*********************************************************************
*
* Copyright (C) 2012, Northwestern University and Argonne National Laboratory
* See COPYRIGHT notice in top-level directory.
*
*********************************************************************/
/* $Id$ */
/* simple demonstration of pnetcdf
* text attribute on dataset
* write out rank into 1-d array collectively.
* The most basic way to do parallel i/o with pnetcdf */
/* This program creates a file, say named output.nc, with the following
contents, shown by running ncmpidump command .
% mpiexec -n 4 pnetcdf-write-standard /orangefs/wkliao/output.nc
% ncmpidump /orangefs/wkliao/output.nc
netcdf output {
// file format: CDF-2 (large file)
dimensions:
d1 = 4 ;
variables:
int v1(d1) ;
int v2(d1) ;
// global attributes:
:string = "Hello World\n",
"" ;
data:
v1 = 0, 1, 2, 3 ;
v2 = 0, 1, 2, 3 ;
}
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <pnetcdf.h>
static void handle_error(int status, int lineno)
{
fprintf(stderr, "Error at line %d: %s\n", lineno, ncmpi_strerror(status));
MPI_Abort(MPI_COMM_WORLD, 1);
}
int main(int argc, char **argv) {
int ret, ncfile, nprocs, rank, dimid, varid1, varid2, ndims=1;
MPI_Offset start, count=1;
char filename[256], buf[13] = "Hello World\n";
int data;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (argc > 2) {
if (rank == 0) printf("Usage: %s filename\n", argv[0]);
MPI_Finalize();
exit(-1);
}
if (argc > 1) snprintf(filename, 256, "%s", argv[1]);
else strcpy(filename, "testfile.nc");
ret = ncmpi_create(MPI_COMM_WORLD, filename,
NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncfile);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_def_dim(ncfile, "d1", nprocs, &dimid);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_def_var(ncfile, "v1", NC_INT, ndims, &dimid, &varid1);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_def_var(ncfile, "v2", NC_INT, ndims, &dimid, &varid2);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_put_att_text(ncfile, NC_GLOBAL, "string", 13, buf);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
/* all processors defined the dimensions, attributes, and variables,
* but here in ncmpi_enddef is the one place where metadata I/O
* happens. Behind the scenes, rank 0 takes the information and writes
* the netcdf header. All processes communicate to ensure they have
* the same (cached) view of the dataset */
ret = ncmpi_enddef(ncfile);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
start=rank, count=1, data=rank;
/* in this simple example every process writes its rank to two 1d variables */
ret = ncmpi_put_vara_int_all(ncfile, varid1, &start, &count, &data);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_put_vara_int_all(ncfile, varid2, &start, &count, &data);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
ret = ncmpi_close(ncfile);
if (ret != NC_NOERR) handle_error(ret, __LINE__);
MPI_Finalize();
return 0;
}
|