File: MPI_Wrappers.h

package info (click to toggle)
lammps 20220106.git7586adbb6a%2Bds1-2
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 348,064 kB
  • sloc: cpp: 831,421; python: 24,896; xml: 14,949; f90: 10,845; ansic: 7,967; sh: 4,226; perl: 4,064; fortran: 2,424; makefile: 1,501; objc: 238; lisp: 163; csh: 16; awk: 14; tcl: 6
file content (46 lines) | stat: -rw-r--r-- 2,031 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#ifndef MPI_WRAPPERS_H
#define MPI_WRAPPERS_H

#include <iostream>
#include <string>
#include "mpi.h"

namespace MPI_Wrappers {

  typedef struct {double val; int rank; } DOUBLE_RANK;

  int rank(MPI_Comm comm);
  bool rank_zero(MPI_Comm comm);
  int size(MPI_Comm comm);
  bool serial(MPI_Comm comm);
  void broadcast(MPI_Comm comm, double *buf, int count = 1);
  void int_broadcast(MPI_Comm comm, int *buf, int count = 1);
  void allsum(MPI_Comm comm, void *send_buf, double *rec_buf, int count = 1);
  void int_allsum(MPI_Comm comm, void *send_buf, int *rec_buf, int count = 1);
  void int_scansum(MPI_Comm comm, int *send_buf, int *rec_buf, int count = 1);
  void allmax(MPI_Comm comm, double *send_buf, double *rec_buf, int count = 1);
  void int_allmax(MPI_Comm comm, int *send_buf, int *rec_buf, int count = 1);
  void allmin(MPI_Comm comm, double *send_buf, double *rec_buf, int count = 1);
  void int_allmin(MPI_Comm comm, int *send_buf, int *rec_buf, int count = 1);
  int rank_min(MPI_Comm comm, double *send_buf, double *rec_buf, int count);
  void int_recv(MPI_Comm comm, int *recv_buf, int max_size,int iproc);
  void recv(MPI_Comm comm, double *recv_buf, int max_size,int iproc);
  void int_send(MPI_Comm comm, int *send_buf,int send_size);
  void send(MPI_Comm comm, double *send_buf,int send_size);
  void allgatherv(MPI_Comm comm, double *send_buf, int send_count,
                  double *rec_buf, int *rec_counts, int *displacements);
  void gather(MPI_Comm comm, double send, double * recv);
  void int_allgather(MPI_Comm comm, int send, int* recv);
  void logical_or(MPI_Comm comm, void *send_buf, int *rec_buf, int count = 1);
  void barrier(MPI_Comm comm);
  void stop(MPI_Comm comm, std::string msg="");
  void int_scatter(MPI_Comm comm, int *send_buf, int *rec_buf, int count = 1);

//  void sparse_allsum(MPI_Comm comm, SparseMatrix<double> &toShare);

  void print_msg(MPI_Comm comm, std::string msg);
  void print_msg_once(MPI_Comm comm,std::string msg,bool prefix=true,bool endline=true);

}

#endif