File: testlist.gpu

package info (click to toggle)
mpich 4.3.0%2Breally4.2.1-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 419,120 kB
  • sloc: ansic: 1,215,557; cpp: 74,755; javascript: 40,763; f90: 20,649; sh: 18,463; xml: 14,418; python: 14,397; perl: 13,772; makefile: 9,279; fortran: 8,063; java: 4,553; asm: 324; ruby: 176; lisp: 19; php: 8; sed: 4
file content (40 lines) | stat: -rw-r--r-- 4,296 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# memtype=all will iterate all memtype for even processes and set odd processes to device

allred 4 arg=-counts=10,100 arg=-memtype=all
allred 7 arg=-counts=10 arg=-memtype=all
allred2 4 arg=-memtype=all
allred2 12 arg=-memtype=all
reduce 5 arg=-memtype=all
reduce 7 arg=-memtype=all
op_coll 4 arg=-memtype=all

# ZE_AFFINITY_MASK tests
allred2 8 arg=-memtype=all env=MTEST_GPU_VISIBILITY_AFFINITY=CONTIGUOUS_DEVICE
allred2 8 arg=-memtype=all env=MTEST_GPU_VISIBILITY_AFFINITY=CONTIGUOUS_SUBDEVICE
allred2 8 arg=-memtype=all env=MTEST_GPU_VISIBILITY_AFFINITY=CONTIGUOUS_SINGLE_SUBDEVICE
allred2 8 arg=-memtype=all env=MTEST_GPU_VISIBILITY_AFFINITY=CONTIGUOUS_MULTI_SUBDEVICE

# The dtp test will iterate over all typelist and counts, each time will repeat [repeat] times and select seed, testsize, memtypes accordingly
# set MPITEST_VERBOSE=1 to the list of tests being run.

# bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-memtype=random timeLimit=600
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=device arg=-oddmemtype=device
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=device arg=-oddmemtype=host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=device arg=-oddmemtype=reg_host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=device arg=-oddmemtype=shared
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=host arg=-oddmemtype=device
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=host arg=-oddmemtype=host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=host arg=-oddmemtype=reg_host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=host arg=-oddmemtype=shared
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=reg_host arg=-oddmemtype=device
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=reg_host arg=-oddmemtype=host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=reg_host arg=-oddmemtype=reg_host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=reg_host arg=-oddmemtype=shared
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=shared arg=-oddmemtype=device
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=shared arg=-oddmemtype=host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=shared arg=-oddmemtype=reg_host
bcast 4 arg=-typelist=MPI_INT,MPI_INT:4+MPI_DOUBLE:8 arg=-counts=1,17,50,100,512,65530 arg=-seed=100 arg=-testsizes=2,50 arg=-evenmemtype=shared arg=-oddmemtype=shared

# test ipc read bcast and alltoall test
bcast_gpu 6 arg=-memtype=all env=MPIR_CVAR_BCAST_COMPOSITION=1 env=MPIR_CVAR_CH4_IPC_GPU_P2P_THRESHOLD=1 env=MPIR_CVAR_BCAST_POSIX_INTRA_ALGORITHM=ipc_read env=MPIR_CVAR_BCAST_IPC_READ_MSG_SIZE_THRESHOLD=1024 env=MTEST_GPU_VISIBILITY_AFFINITY=CONTIGUOUS_DEVICE
alltoall_gpu 6 arg=-memtype=all env=MPIR_CVAR_ALLTOALL_COMPOSITION=2 env=MPIR_CVAR_CH4_IPC_GPU_P2P_THRESHOLD=1 env=MPIR_CVAR_ALLTOALL_POSIX_INTRA_ALGORITHM=ipc_read env=MPIR_CVAR_ALLTOALL_IPC_READ_MSG_SIZE_THRESHOLD=1024 env=MTEST_GPU_VISIBILITY_AFFINITY=CONTIGUOUS_DEVICE