File: mpi_ops.cc

package info (click to toggle)
pytorch 1.7.1-7
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 80,340 kB
  • sloc: cpp: 670,830; python: 343,991; ansic: 67,845; asm: 5,503; sh: 2,924; java: 2,888; xml: 266; makefile: 244; ruby: 148; yacc: 144; objc: 51; lex: 44
file content (33 lines) | stat: -rw-r--r-- 1,020 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#include "caffe2/mpi/mpi_ops.h"

namespace caffe2 {

OPERATOR_SCHEMA(MPICreateCommonWorld)
  .NumInputs(0)
  .NumOutputs(1);
OPERATOR_SCHEMA(MPIBroadcast)
  .NumInputs(2)
  .NumOutputs(1)
  .EnforceInplace({{1, 0}});
OPERATOR_SCHEMA(MPIReduce)
  .NumInputs(2)
  .NumOutputs(1);
OPERATOR_SCHEMA(MPIAllgather)
  .NumInputs(2)
  .NumOutputs(1);
OPERATOR_SCHEMA(MPIAllreduce)
  .NumInputs(2)
  .NumOutputs(1)
  .AllowInplace({{1, 0}});
OPERATOR_SCHEMA(MPISendTensor);
OPERATOR_SCHEMA(MPIReceiveTensor);

REGISTER_CPU_OPERATOR(MPICreateCommonWorld, MPICreateCommonWorldOp<CPUContext>);
REGISTER_CPU_OPERATOR(MPIBroadcast, MPIBroadcastOp<CPUContext>);
REGISTER_CPU_OPERATOR(MPIReduce, MPIReduceOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(MPIAllgather, MPIAllgatherOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(MPIAllreduce, MPIAllreduceOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(MPISendTensor, MPISendTensorOp<CPUContext>);
REGISTER_CPU_OPERATOR(MPIReceiveTensor, MPIReceiveTensorOp<CPUContext>);

}  // namespace caffe2