File: testlist.gpu

package info (click to toggle)
mpich 5.0.0-1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 251,828 kB
  • sloc: ansic: 1,323,147; cpp: 82,869; f90: 72,420; javascript: 40,763; perl: 28,296; sh: 19,399; python: 16,191; xml: 14,418; makefile: 9,468; fortran: 8,046; java: 4,635; pascal: 352; asm: 324; ruby: 176; awk: 27; lisp: 19; php: 8; sed: 4
file content (22 lines) | stat: -rw-r--r-- 3,491 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# The dtp test will iterate over all typelist and counts, each time will repeat [repeat] times and select seed, testsize, memtypes accordingly
# set MPITEST_VERBOSE=1 to the list of tests being run.

# sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-memtype=random arg=-repeat=2 timeLimit=600
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=device arg=-recvmem=device
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=device arg=-recvmem=host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=device arg=-recvmem=reg_host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=device arg=-recvmem=shared
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=host arg=-recvmem=device
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=host arg=-recvmem=host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=host arg=-recvmem=reg_host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=host arg=-recvmem=shared
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=reg_host arg=-recvmem=device
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=reg_host arg=-recvmem=host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=reg_host arg=-recvmem=reg_host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=reg_host arg=-recvmem=shared
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=shared arg=-recvmem=device
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=shared arg=-recvmem=host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=shared arg=-recvmem=reg_host
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8,100 arg=-sendmem=shared arg=-recvmem=shared
sendrecv1 2 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=200 arg=-testsizes=8 arg=-sendmem=device arg=-recvmem=device env=MPIR_CVAR_CH4_OFI_EAGER_THRESHOLD=100000 env=MPIR_CVAR_CH4_OFI_RNDV_PROTOCOL=pipeline timeLimit=600
pipeline 2 env=MPIR_CVAR_CH4_OFI_EAGER_THRESHOLD=131072 env=MPIR_CVAR_CH4_OFI_RNDV_PROTOCOL=pipeline env=MPIR_CVAR_CH4_OFI_PIPELINE_NUM_CHUNKS=4 env=MPIR_CVAR_CH4_OFI_PIPELINE_CHUNK_SZ=1048576