File: partition_desc.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (58 lines) | stat: -rw-r--r-- 1,755 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#pragma once

#include <c10/util/Exception.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/codegen/fuser/tensor_desc.h>

#include <cstdint>
#include <memory>
#include <vector>

namespace torch::jit::fuser {

// Descriptor for chunk-ing an input tensor into subtensors
// OR concat-ing an output tensor from subtensors
// Note: default constructed used for tensors that do not participate in
// chunk or cat operations.
struct TORCH_API PartitionDesc {
  PartitionDesc() : nSubTensors_{1}, dim_{0} {}

  PartitionDesc(const TensorDesc& _desc, size_t _nSubTensors, size_t _dim)
      : nSubTensors_{_nSubTensors}, dim_{_dim} {
    AT_ASSERT(nSubTensors_ > 1);
    std::vector<bool> cont = _desc.contiguity;
    if (dim_ > 0) {
      // when we narrow the concatenated output/chunked input
      // we make the size[dim] smaller while keeping the stride[dim] the same,
      // meaning: stride[dim - 1] != stride[dim]*size[dim]
      // so dim - 1 is no longer contiguous
      cont[dim_ - 1] = false;
    }
    subTensorDesc_ = std::make_shared<TensorDesc>(_desc.scalar_type, cont);
  }

  bool isNoop() const {
    return (nSubTensors_ == 1);
  }
  size_t nSubTensors() const {
    return nSubTensors_;
  }
  size_t dim() const {
    return dim_;
  }
  std::shared_ptr<TensorDesc> subTensorDesc() {
    return subTensorDesc_;
  }
  const std::shared_ptr<TensorDesc> subTensorDesc() const {
    return subTensorDesc_;
  }

 private:
  size_t nSubTensors_; // == 1 for tensors that should not be operated on via
                       // chunk/cat
  size_t dim_; // dimension along which the chunk/concat occurs
  std::shared_ptr<TensorDesc>
      subTensorDesc_; // descriptor for the subtensor, if it exists
};

} // namespace torch::jit::fuser