File: fuse_linear.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (22 lines) | stat: -rw-r--r-- 743 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
/** \brief Fusing linear patterns as single at::linear for easier pattern
 * matching in later passes
 */
#pragma once

#include <torch/csrc/jit/ir/ir.h>

namespace torch::jit {

/** \brief Match the at::linear pattern and fuse it into a single at::linear
 * This pass fuse the addmm or matmul + add generated by JIT back to linear
 * This pass can be deleted once the JIT can emit the aten::linear in the future
 */
TORCH_API void FuseLinear(std::shared_ptr<Graph>& graph);

/** Swap functional linear CallFunctions to aten::linear
 */
TORCH_API void SwapFunctionalLinear(std::shared_ptr<Graph>& graph);
/** Swap all functional linear CallFunctions in module
 */
TORCH_API void SwapFunctionalLinear(Module& module);
} // namespace torch::jit