1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
|
/*
* Copyright (C) by Argonne National Laboratory
* See COPYRIGHT in top-level directory
*/
/*
* Run concurrent sends to different target processes. Stresses an
* implementation that permits concurrent sends to different targets.
*
* By turning on verbose output, some simple performance data will be output.
*/
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include "mpitest.h"
#include "mpithreadtest.h"
/* This is the main test routine */
#define MAX_CNT 660000
#define MAX_LOOP 200
static int nthreads = -1;
MTEST_THREAD_RETURN_TYPE run_test_send(void *arg);
MTEST_THREAD_RETURN_TYPE run_test_send(void *arg)
{
int cnt, j, *buf, tag;
int thread_num = (int) (long) arg;
double t;
for (cnt = 1, tag = 1; cnt < MAX_CNT; cnt = 2 * cnt, tag++) {
buf = (int *) malloc(cnt * sizeof(int));
MTEST_VG_MEM_INIT(buf, cnt * sizeof(int));
/* Wait for all senders to be ready */
MTest_thread_barrier(nthreads);
t = MPI_Wtime();
for (j = 0; j < MAX_LOOP; j++)
MPI_Send(buf, cnt, MPI_INT, thread_num, tag, MPI_COMM_WORLD);
t = MPI_Wtime() - t;
free(buf);
if (thread_num == 1)
MTestPrintfMsg(1, "buf size %d: time %f\n", cnt, t / MAX_LOOP);
}
return (MTEST_THREAD_RETURN_TYPE) NULL;
}
void run_test_recv(void);
void run_test_recv(void)
{
int cnt, j, *buf, tag;
MPI_Status status;
double t;
for (cnt = 1, tag = 1; cnt < MAX_CNT; cnt = 2 * cnt, tag++) {
buf = (int *) malloc(cnt * sizeof(int));
MTEST_VG_MEM_INIT(buf, cnt * sizeof(int));
t = MPI_Wtime();
for (j = 0; j < MAX_LOOP; j++)
MPI_Recv(buf, cnt, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
t = MPI_Wtime() - t;
free(buf);
}
}
int main(int argc, char **argv)
{
int i, pmode, nprocs, rank;
int errs = 0, err;
MTest_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &pmode);
if (pmode != MPI_THREAD_MULTIPLE) {
fprintf(stderr, "Thread Multiple not supported by the MPI implementation\n");
MPI_Abort(MPI_COMM_WORLD, -1);
}
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (nprocs < 2) {
fprintf(stderr, "Need at least two processes\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) {
nthreads = nprocs - 1;
err = MTest_thread_barrier_init();
if (err) {
fprintf(stderr, "Could not create thread barrier\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
for (i = 1; i < nprocs; i++)
MTest_Start_thread(run_test_send, (void *) (long) i);
MTest_Join_threads();
MTest_thread_barrier_free();
} else {
run_test_recv();
}
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
|