File: comm.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (45 lines) | stat: -rw-r--r-- 1,197 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#pragma once

#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>

#include <ATen/ATen.h>
#include <c10/cuda/CUDAStream.h>
#include <optional>

#include <cstddef>
#include <vector>

namespace torch::autograd {

struct TORCH_CUDA_CU_API Scatter : public Node {
  explicit Scatter(
      std::vector<at::Device> devices,
      std::optional<std::vector<int64_t>> chunk_sizes = std::nullopt,
      int64_t dim = 0,
      std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams =
          std::nullopt,
      bool unsqueeze_scalars = false);
  ~Scatter() override;

  variable_list apply(variable_list&& inputs) override;

  std::vector<at::Device> devices_;
  std::optional<std::vector<int64_t>> chunk_sizes_;
  int64_t dim_;
  std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams_;
  bool unsqueeze_scalars_;
};

struct TORCH_CUDA_CU_API Gather : public Node {
  explicit Gather(const at::Device& destination_device, int64_t dim = 0);
  ~Gather() override;

  variable_list apply(variable_list&& inputs) override;

  at::Device destination_device_;
  int64_t dim_;
};

} // namespace torch::autograd