File: aggregation2.c

package info (click to toggle)
openmpi 2.0.2-2
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 99,912 kB
  • ctags: 55,589
  • sloc: ansic: 525,999; f90: 18,307; makefile: 12,062; sh: 6,583; java: 6,278; asm: 3,515; cpp: 2,227; perl: 2,136; python: 1,350; lex: 734; fortran: 52; tcl: 12
file content (89 lines) | stat: -rw-r--r-- 2,446 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
 *  (C) 2007 by Argonne National Laboratory.
 *      See COPYRIGHT in top-level directory.
 */

/* Look for regressions in aggregator code.  A more simple access pattern than
 * aggregation1 */

#include <mpi.h>

#include <stdlib.h>
#include <stdio.h>
#include <time.h>

#include <sys/types.h>
#include <unistd.h>

#include <string.h>

#define BUFSIZE 512

static void handle_error(int errcode, const char *str)
{
        char msg[MPI_MAX_ERROR_STRING];
        int resultlen;
        MPI_Error_string(errcode, msg, &resultlen);
        fprintf(stderr, "%s: %s\n", str, msg);
        MPI_Abort(MPI_COMM_WORLD, 1);
}

int main(int argc, char ** argv)
{
    MPI_Info info = MPI_INFO_NULL;
    MPI_File fh;
    MPI_Offset off=0;
    MPI_Status status;
    int errcode;
    int i, rank, errs=0, toterrs, buffer[BUFSIZE], buf2[BUFSIZE];

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Info_create(&info);
    MPI_Info_set(info, "romio_cb_write", "enable");
    MPI_Info_set(info, "cb_nodes", "1");

    for (i=0; i<BUFSIZE; i++) {
        buffer[i] = 10000+rank;
    }
    off = rank*sizeof(buffer);

    errcode = MPI_File_open(MPI_COMM_WORLD, argv[1],
		MPI_MODE_WRONLY|MPI_MODE_CREATE, info, &fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_open");
    errcode = MPI_File_write_at_all(fh, off, buffer, BUFSIZE,
		MPI_INT,  &status);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_write_at_all");
    errcode = MPI_File_close(&fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_close");

    errcode = MPI_File_open(MPI_COMM_WORLD, argv[1],
		MPI_MODE_RDONLY, info, &fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_open");
    errcode = MPI_File_read_at_all(fh, off, buf2, BUFSIZE,
		MPI_INT,  &status);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_read_at_all");
    errcode = MPI_File_close(&fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_close");

    for (i=0; i<BUFSIZE; i++) {
        if (buf2[i] != 10000+rank)
	    errs++;
    }
    MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (rank == 0) {
	if( toterrs > 0) {
	    fprintf( stderr, "Found %d errors\n", toterrs );
	}
	else {
	    fprintf( stdout, " No Errors\n" );
	}
    }
    MPI_Info_free(&info);
    MPI_Finalize();

    return 0;
}