1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
|
/*
* Copyright (C) by Argonne National Laboratory
* See COPYRIGHT in top-level directory
*/
#include <mpi.h>
#include <stdio.h>
#include <assert.h>
#include "mpitest.h"
#define ITER 100
#define MAX_SIZE 65536
int main(int argc, char *argv[])
{
int rank, nproc, i;
int errors = 0, all_errors = 0;
int *buf = NULL, *winbuf = NULL;
MPI_Win window;
MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
if (nproc < 2) {
if (rank == 0)
printf("Error: must be run with two or more processes\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Alloc_mem(MAX_SIZE * sizeof(int), MPI_INFO_NULL, &buf);
MPI_Alloc_mem(MAX_SIZE * sizeof(int), MPI_INFO_NULL, &winbuf);
MPI_Win_create(winbuf, MAX_SIZE * sizeof(int), sizeof(int), MPI_INFO_NULL,
MPI_COMM_WORLD, &window);
MPI_Win_lock_all(0, window);
/* Test Raccumulate local completion with small data.
* Small data is always copied to header packet as immediate data. */
if (rank == 1) {
for (i = 0; i < ITER; i++) {
MPI_Request acc_req;
int val = -1;
buf[0] = rank * i;
MPI_Raccumulate(&buf[0], 1, MPI_INT, 0, 0, 1, MPI_INT, MPI_REPLACE, window, &acc_req);
MPI_Wait(&acc_req, MPI_STATUS_IGNORE);
/* reset local buffer to check local completion */
buf[0] = 0;
MPI_Win_flush(0, window);
MPI_Get(&val, 1, MPI_INT, 0, 0, 1, MPI_INT, window);
MPI_Win_flush(0, window);
if (val != rank * i) {
printf("%d - Got %d in small Raccumulate test, expected %d (%d * %d)\n", rank, val,
rank * i, rank, i);
errors++;
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
/* Test Raccumulate local completion with large data .
* Large data is not suitable for 1-copy optimization, and always sent out
* from user buffer. */
if (rank == 1) {
for (i = 0; i < ITER; i++) {
MPI_Request acc_req;
int val0 = -1, val1 = -1, val2 = -1;
int j;
/* initialize data */
for (j = 0; j < MAX_SIZE; j++) {
buf[j] = rank + j + i;
}
MPI_Raccumulate(buf, MAX_SIZE, MPI_INT, 0, 0, MAX_SIZE, MPI_INT, MPI_REPLACE, window,
&acc_req);
MPI_Wait(&acc_req, MPI_STATUS_IGNORE);
/* reset local buffer to check local completion */
buf[0] = 0;
buf[MAX_SIZE - 1] = 0;
buf[MAX_SIZE / 2] = 0;
MPI_Win_flush(0, window);
/* get remote values which are modified in local buffer after wait */
MPI_Get(&val0, 1, MPI_INT, 0, 0, 1, MPI_INT, window);
MPI_Get(&val1, 1, MPI_INT, 0, MAX_SIZE - 1, 1, MPI_INT, window);
MPI_Get(&val2, 1, MPI_INT, 0, MAX_SIZE / 2, 1, MPI_INT, window);
MPI_Win_flush(0, window);
if (val0 != rank + i) {
printf("%d - Got %d in large Raccumulate test, expected %d\n", rank,
val0, rank + i);
errors++;
}
if (val1 != rank + MAX_SIZE - 1 + i) {
printf("%d - Got %d in large Raccumulate test, expected %d\n", rank,
val1, rank + MAX_SIZE - 1 + i);
errors++;
}
if (val2 != rank + MAX_SIZE / 2 + i) {
printf("%d - Got %d in large Raccumulate test, expected %d\n", rank,
val2, rank + MAX_SIZE / 2 + i);
errors++;
}
}
}
MPI_Win_unlock_all(window);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Win_free(&window);
if (buf)
MPI_Free_mem(buf);
if (winbuf)
MPI_Free_mem(winbuf);
MTest_Finalize(errors);
return MTestReturnValue(all_errors);
}
|