1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
|
/*
Copyright 2019, UCAR/Unidata
See COPYRIGHT file for copying and redistribution conditions.
This program tests netcdf-4 parallel I/O using the zlib fliter while
writing with parallel I/O. This works for HDF5-1.10.2 and later. In
this case HDF5_SUPPORTS_PAR_FILTERS will be defined during
configure.
Ed Hartnett, 12/19/2019
*/
/* Defining USE_MPE causes the MPE trace library to be used (and you
* must also relink with -llmpe -lmpe). This causes clog2 output to be
* written, which can be converted to slog2 (by the program
* clog2TOslog2) and then used in the analysis program jumpshot. */
/*#define USE_MPE 1*/
#include <nc_tests.h>
#include "err_macros.h"
#include <mpi.h>
#ifdef USE_MPE
#include <mpe.h>
#endif /* USE_MPE */
#define FILE "tst_parallel_zlib.nc"
#define NDIMS 3
#define DIMSIZE 24
#define QTR_DATA (DIMSIZE * DIMSIZE / 4)
#define NUM_PROC 4
#define NUM_SLABS 10
int
main(int argc, char **argv)
{
/* MPI stuff. */
int mpi_namelen;
char mpi_name[MPI_MAX_PROCESSOR_NAME];
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
/* Netcdf-4 stuff. */
int ncid, v1id, dimids[NDIMS];
size_t start[NDIMS], count[NDIMS];
int i, res;
int slab_data[DIMSIZE * DIMSIZE / 4]; /* one slab */
char file_name[NC_MAX_NAME + 1];
#ifdef USE_MPE
int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */
/* Initialize MPI. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Get_processor_name(mpi_name, &mpi_namelen);
/*printf("mpi_name: %s size: %d rank: %d\n", mpi_name,
mpi_size, mpi_rank);*/
#ifdef USE_MPE
MPE_Init_log();
s_init = MPE_Log_get_event_number();
e_init = MPE_Log_get_event_number();
s_define = MPE_Log_get_event_number();
e_define = MPE_Log_get_event_number();
s_write = MPE_Log_get_event_number();
e_write = MPE_Log_get_event_number();
s_close = MPE_Log_get_event_number();
e_close = MPE_Log_get_event_number();
MPE_Describe_state(s_init, e_init, "Init", "red");
MPE_Describe_state(s_define, e_define, "Define", "yellow");
MPE_Describe_state(s_write, e_write, "Write", "green");
MPE_Describe_state(s_close, e_close, "Close", "purple");
MPE_Start_log();
MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */
if (mpi_rank == 0)
{
printf("\n*** Testing parallel writes with zlib.\n");
printf("*** testing simple write with zlib...");
}
/* Create phony data. We're going to write a 24x24 array of ints,
in 4 sets of 144. */
for (i = 0; i < DIMSIZE * DIMSIZE / 4; i++)
slab_data[i] = mpi_rank;
#ifdef USE_MPE
MPE_Log_event(e_init, 0, "end init");
MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */
/* Create a parallel netcdf-4 file. */
/*nc_set_log_level(3);*/
/* sprintf(file_name, "%s/%s", TEMP_LARGE, FILE); */
sprintf(file_name, "%s", FILE);
if ((res = nc_create_par(file_name, NC_NETCDF4, comm, info, &ncid))) ERR;
/* Create three dimensions. */
if (nc_def_dim(ncid, "d1", DIMSIZE, dimids)) ERR;
if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
if (nc_def_dim(ncid, "d3", NUM_SLABS, &dimids[2])) ERR;
/* Create one var. */
if ((res = nc_def_var(ncid, "v1", NC_INT, NDIMS, dimids, &v1id))) ERR;
/* Setting deflate only will work for HDF5-1.10.2 and later
* versions. */
res = nc_def_var_deflate(ncid, 0, 0, 1, 1);
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if (res) ERR;
#else
if (res != NC_EINVAL) ERR;
#endif
/* Write metadata to file. */
if ((res = nc_enddef(ncid))) ERR;
#ifdef USE_MPE
MPE_Log_event(e_define, 0, "end define file");
if (mpi_rank)
sleep(mpi_rank);
#endif /* USE_MPE */
/* Set up slab for this process. */
start[0] = mpi_rank * DIMSIZE/mpi_size;
start[1] = 0;
count[0] = DIMSIZE/mpi_size;
count[1] = DIMSIZE;
count[2] = 1;
/*printf("mpi_rank=%d start[0]=%d start[1]=%d count[0]=%d count[1]=%d\n",
mpi_rank, start[0], start[1], count[0], count[1]);*/
/* Not necessary, but harmless. */
if (nc_var_par_access(ncid, v1id, NC_COLLECTIVE)) ERR;
for (start[2] = 0; start[2] < NUM_SLABS; start[2]++)
{
#ifdef USE_MPE
MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */
nc_set_log_level(3);
/* Write slabs of phoney data. */
if (nc_put_vara_int(ncid, v1id, start, count, slab_data)) ERR;
#ifdef USE_MPE
MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
}
#ifdef USE_MPE
MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */
/* Close the netcdf file. */
if ((res = nc_close(ncid))) ERR;
#ifdef USE_MPE
MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */
/* Shut down MPI. */
MPI_Finalize();
if (mpi_rank == 0)
{
SUMMARIZE_ERR;
FINAL_RESULTS;
}
return 0;
}
|