1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
|
#pragma once
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
/*
* This file handles Parsing PyTorch jit ir;
*
* It is used in two places:
* 1. When partitioning PyTorch jit ir to create prim::CudaFusionGroup, each
* node is queried by `isNodeParsible` to determine whether the node could
* be handled by the fuser (whether a given PyTorch jit operator should be
* merged);
* 2. lowering PyTorch jit ir to CUDA codegen ir.
* creates a `Fusion` by traversing a PyTorch jit graph.
*
* TODO: we could consider exposing API to allow custom registration of parsing
* rules for a given PyTorch jit operator.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
constexpr int kPwThreadX = 128;
constexpr int kFcdReductionThreadX = 128;
constexpr int kNonFcdReductionThreadX = 32;
constexpr int kNonFcdReductionThreadY = 32;
TORCH_CUDA_API bool hasReductionNode(const Block* block);
TORCH_CUDA_API bool isReductionNode(const Node* node);
// returns whether or not a parsing function exists for the given node type.
TORCH_CUDA_API bool isNodeParsible(const Node* node);
// lowers PyTorch jit graph to `Fusion`.
TORCH_CUDA_API std::unique_ptr<Fusion> parseJitIR(
std::shared_ptr<Graph>& graph);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
|