File: petscsftypes.h

package info (click to toggle)
petsc 3.24.4%2Bdfsg1-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 524,356 kB
  • sloc: ansic: 762,946; cpp: 52,575; python: 39,571; f90: 17,688; javascript: 3,493; makefile: 3,217; sh: 1,512; xml: 619; objc: 445; java: 13; csh: 1
file content (130 lines) | stat: -rw-r--r-- 4,332 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#pragma once

/* MANSEC = Vec */
/* SUBMANSEC = PetscSF */

/*S
   PetscSF - PETSc object for managing the communication of certain entries of arrays and `Vec` between MPI processes.

   Level: intermediate

  `PetscSF` uses the concept of star forests to indicate and determine the communication patterns concisely and efficiently.
  A star  <https://en.wikipedia.org/wiki/Star_(graph_theory)> forest is simply a collection of trees of height 1. The leave nodes represent
  "ghost locations" for the root nodes.

  The standard usage paradigm for `PetscSF` is to provide the communication pattern with `PetscSFSetGraph()` or `PetscSFSetGraphWithPattern()` and
  then perform the communication using `PetscSFBcastBegin()` and `PetscSFBcastEnd()`, `PetscSFReduceBegin()` and `PetscSFReduceEnd()`.

.seealso: [](sec_petscsf), `PetscSFCreate()`, `PetscSFSetGraph()`, `PetscSFSetGraphWithPattern()`, `PetscSFBcastBegin()`, `PetscSFBcastEnd()`,
          `PetscSFReduceBegin()`, `PetscSFReduceEnd()`, `VecScatter`, `VecScatterCreate()`
S*/
typedef struct _p_PetscSF *PetscSF;

/*J
  PetscSFType - String with the name of a `PetscSF` type. Each `PetscSFType` uses different mechanisms to perform the communication.

  Level: beginner

  Available Types:
+ `PETSCSFBASIC`      - use MPI sends and receives
. `PETSCSFNEIGHBOR`   - use MPI_Neighbor operations
. `PETSCSFALLGATHERV` - use MPI_Allgatherv operations
. `PETSCSFALLGATHER`  - use MPI_Allgather operations
. `PETSCSFGATHERV`    - use MPI_Igatherv and MPI_Iscatterv operations
. `PETSCSFGATHER`     - use MPI_Igather and MPI_Iscatter operations
. `PETSCSFALLTOALL`   - use MPI_Ialltoall operations
- `PETSCSFWINDOW`     - use MPI_Win operations

  Note:
  Some `PetscSFType` only provide specialized code for a subset of the `PetscSF` operations and use `PETSCSFBASIC` for the others.

.seealso: [](sec_petscsf), `PetscSFSetType()`, `PetscSF`
J*/
typedef const char *PetscSFType;
#define PETSCSFBASIC      "basic"
#define PETSCSFNEIGHBOR   "neighbor"
#define PETSCSFALLGATHERV "allgatherv"
#define PETSCSFALLGATHER  "allgather"
#define PETSCSFGATHERV    "gatherv"
#define PETSCSFGATHER     "gather"
#define PETSCSFALLTOALL   "alltoall"
#define PETSCSFWINDOW     "window"

/*S
   PetscSFNode - specifier of MPI rank owner and local index for array or `Vec` entry locations that are to be communicated with a `PetscSF`

   Level: beginner

  Sample Usage:
.vb
    PetscSFNode    *remote;
    PetscCall(PetscMalloc1(nleaves,&remote));
    for (i=0; i<size; i++) {
      remote[i].rank = i;
      remote[i].index = rank;
    }
.ve

  Sample Fortran Usage:
.vb
    type(PetscSFNode) remote(6)
    remote(1)%rank  = modulo(rank+size-1,size)
    remote(1)%index = 1 * stride
.ve

  Notes:
  Use  `MPIU_SF_NODE` when performing MPI operations on arrays of `PetscSFNode`

  Generally the values of `rank` should be in $[ 0,size)$  and the value of `index` greater than or equal to 0, but there are some situations that violate this.

.seealso: [](sec_petscsf), `PetscSF`, `PetscSFSetGraph()`
S*/
typedef struct {
  PetscInt rank;  /* MPI rank of owner */
  PetscInt index; /* Index of node on rank */
} PetscSFNode;

#define MPIU_SF_NODE MPIU_2INT

typedef enum {
  PETSCSF_ROOT2LEAF = 0,
  PETSCSF_LEAF2ROOT = 1
} PetscSFDirection;
typedef enum {
  PETSCSF_BCAST  = 0,
  PETSCSF_REDUCE = 1,
  PETSCSF_FETCH  = 2
} PetscSFOperation;
/* When doing device-aware MPI, a backend refers to the SF/device interface */
typedef enum {
  PETSCSF_BACKEND_INVALID = 0,
  PETSCSF_BACKEND_CUDA    = 1,
  PETSCSF_BACKEND_HIP     = 2,
  PETSCSF_BACKEND_KOKKOS  = 3
} PetscSFBackend;
typedef struct _n_PetscSFLink *PetscSFLink;

/*S
  VecScatter - Object used to manage communication of data
  between vectors in parallel or between parallel and sequential vectors. Manages both scatters and gathers

  Level: beginner

  Note:
  This is an alias for `PetscSF`.

.seealso: [](sec_petscsf), `Vec`, `PetscSF`, `VecScatterCreate()`, `VecScatterBegin()`, `VecScatterEnd()`
S*/
typedef PetscSF VecScatter;

/*J
  VecScatterType - String with the name of a PETSc vector scatter type

  Level: beginner

  Note:
  This is an alias for `PetscSFType`

.seealso: [](sec_petscsf), `PetscSFType`, `VecScatterSetType()`, `VecScatter`, `VecScatterCreate()`, `VecScatterDestroy()`
J*/
typedef PetscSFType VecScatterType;