File: alltoallv.c

package info (click to toggle)
mpich 4.3.0%2Breally4.2.1-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 419,120 kB
  • sloc: ansic: 1,215,557; cpp: 74,755; javascript: 40,763; f90: 20,649; sh: 18,463; xml: 14,418; python: 14,397; perl: 13,772; makefile: 9,279; fortran: 8,063; java: 4,553; asm: 324; ruby: 176; lisp: 19; php: 8; sed: 4
file content (134 lines) | stat: -rw-r--r-- 4,156 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
/*
 * Copyright (C) by Argonne National Laboratory
 *     See COPYRIGHT in top-level directory
 */

#include "mpi.h"
#include "mpitest.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>

/*
  This program tests MPI_Alltoallv by having processor i send different
  amounts of data to each processor.

  Because there are separate send and receive types to alltoallv,
  there need to be tests to rearrange data on the fly.  Not done yet.

  The first test sends i items to processor i from all processors.

  Currently, the test uses only MPI_INT; this is adequate for testing systems
  that use point-to-point operations
 */

int main(int argc, char **argv)
{

    MPI_Comm comm;
    int *sbuf, *rbuf;
    int rank, size;
    int *sendcounts, *recvcounts, *rdispls, *sdispls;
    int i, j, *p, errs;

    MTest_Init(&argc, &argv);
    errs = 0;

    while (MTestGetIntracommGeneral(&comm, 2, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        /* Create the buffer */
        MPI_Comm_size(comm, &size);
        MPI_Comm_rank(comm, &rank);
        sbuf = (int *) malloc(size * size * sizeof(int));
        rbuf = (int *) malloc(size * size * sizeof(int));
        if (!sbuf || !rbuf) {
            fprintf(stderr, "Could not allocated buffers!\n");
            MPI_Abort(comm, 1);
        }

        /* Load up the buffers */
        for (i = 0; i < size * size; i++) {
            sbuf[i] = i + 100 * rank;
            rbuf[i] = -i;
        }

        /* Create and load the arguments to alltoallv */
        sendcounts = (int *) malloc(size * sizeof(int));
        recvcounts = (int *) malloc(size * sizeof(int));
        rdispls = (int *) malloc(size * sizeof(int));
        sdispls = (int *) malloc(size * sizeof(int));
        if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
            fprintf(stderr, "Could not allocate arg items!\n");
            MPI_Abort(comm, 1);
        }
        for (i = 0; i < size; i++) {
            sendcounts[i] = i;
            recvcounts[i] = rank;
            rdispls[i] = i * rank;
            sdispls[i] = (i * (i + 1)) / 2;
        }
        MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, comm);

        /* Check rbuf */
        for (i = 0; i < size; i++) {
            p = rbuf + rdispls[i];
            for (j = 0; j < rank; j++) {
                if (p[j] != i * 100 + (rank * (rank + 1)) / 2 + j) {
                    fprintf(stderr, "[%d] got %d expected %d for %dth\n",
                            rank, p[j], (i * (i + 1)) / 2 + j, j);
                    errs++;
                }
            }
        }

        free(sdispls);
        free(sendcounts);
        free(sbuf);

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
        /* check MPI_IN_PLACE, added in MPI-2.2 */
        free(rbuf);
        rbuf = (int *) malloc(size * (2 * size) * sizeof(int));
        if (!rbuf) {
            fprintf(stderr, "Could not reallocate rbuf!\n");
            MPI_Abort(comm, 1);
        }

        /* Load up the buffers */
        for (i = 0; i < size; i++) {
            recvcounts[i] = i + rank;
            rdispls[i] = i * (2 * size);
        }
        memset(rbuf, -1, size * (2 * size) * sizeof(int));
        for (i = 0; i < size; i++) {
            p = rbuf + rdispls[i];
            for (j = 0; j < recvcounts[i]; ++j) {
                p[j] = 100 * rank + 10 * i + j;
            }
        }
        MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, comm);
        /* Check rbuf */
        for (i = 0; i < size; i++) {
            p = rbuf + rdispls[i];
            for (j = 0; j < recvcounts[i]; j++) {
                int expected = 100 * i + 10 * rank + j;
                if (p[j] != expected) {
                    fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
                            rank, p[j], expected, i, j);
                    ++errs;
                }
            }
        }
#endif

        free(rdispls);
        free(recvcounts);
        free(rbuf);
        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}