File: main.c

package info (click to toggle)
libgpuarray 0.7.6-13
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 3,176 kB
  • sloc: ansic: 19,235; python: 4,591; makefile: 208; javascript: 71; sh: 15
file content (51 lines) | stat: -rw-r--r-- 1,245 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#include <stdlib.h>
#ifdef TEST_COLLECTIVES
#include <stdio.h>
#endif  // TEST_COLLECTIVES

#include <check.h>
#ifdef TEST_COLLECTIVES
#include <mpi.h>

extern int comm_ndev;
extern int comm_rank;
extern char *dev_name;
#endif  // TEST_COLLECTIVES
extern Suite *get_suite(void);

int main(int argc, char *argv[])
{
  int number_failed;
  Suite *s;
  SRunner *sr;

#ifdef TEST_COLLECTIVES
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &comm_ndev);
  MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);

  if (argc < comm_ndev) {
    if (comm_rank == 0)
      printf("Usage : %s <GPU list per rank>\n", argv[0]);
    exit(1);
  }

  dev_name = argv[comm_rank + 1];  // Set a gpu for this process.
#endif  // TEST_COLLECTIVES

  s = get_suite();
  sr = srunner_create(s);
#ifdef TEST_COLLECTIVES
  // Check by default forks to another (non mpi registered) process in order to
  // run tests. Using MPI inside tests means we must disable this.
  srunner_set_fork_status(sr, CK_NOFORK);
#endif  // TEST_COLLECTIVES
  srunner_run_all(sr, CK_VERBOSE);
  number_failed = srunner_ntests_failed(sr);
  srunner_free(sr);

#ifdef TEST_COLLECTIVES
  MPI_Finalize();
#endif  // TEST_COLLECTIVES
  return number_failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
}