File: python_arg_parsing.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (49 lines) | stat: -rw-r--r-- 1,423 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#pragma once

#include <ATen/core/Tensor.h>
#include <torch/csrc/python_headers.h>

#include <torch/csrc/utils/python_arg_parser.h>

namespace torch::autograd::utils {

// The parameter allow_copy is to accept copy for Tensor.to (and by proxy
// PackedSequences.to) but not nn.Module.to.
inline std::tuple<
    std::optional<at::Device>,
    std::optional<at::ScalarType>,
    bool,
    bool,
    std::optional<at::MemoryFormat>>
parse_to_conversion(PythonArgs& r, bool allow_copy) {
  if (r.idx == 0) {
    if (!allow_copy && !r.isNone(3))
      throw std::runtime_error(".to() does not accept copy argument");
    return std::make_tuple(
        r.deviceOptional(0),
        r.scalartypeOptional(1),
        r.toBool(2),
        r.toBool(3),
        r.memoryformatOptional(4));
  } else if (r.idx == 1) {
    if (!allow_copy && !r.isNone(2))
      throw std::runtime_error(".to() does not accept copy argument");
    return std::make_tuple(
        std::nullopt,
        r.scalartype(0),
        r.toBool(1),
        r.toBool(2),
        r.memoryformatOptional(3));
  } else {
    auto tensor = r.tensor(0);
    if (!allow_copy && !r.isNone(2))
      throw std::runtime_error(".to() does not accept copy argument");
    return std::make_tuple(
        tensor.device(),
        tensor.scalar_type(),
        r.toBool(1),
        r.toBool(2),
        r.memoryformatOptional(3));
  }
}
} // namespace torch::autograd::utils