1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
|
#pragma once
#include <c10/macros/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/profiling_record.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
/*
* This file handles Parsing PyTorch jit ir;
*
* It is used in two places:
* 1. When partitioning PyTorch jit ir to create prim::CudaFusionGroup, each
* node is queried by `isNodeParsible` to determine whether the node could
* be handled by the fuser (whether a given PyTorch jit operator should be
* merged);
* 2. lowering PyTorch jit ir to CUDA codegen ir.
* creates a `Fusion` by traversing a PyTorch jit graph.
*
* TODO: we could consider exposing API to allow custom registration of parsing
* rules for a given PyTorch jit operator.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
constexpr int kPwThreadX = 128;
constexpr int kFcdReductionThreadX = 128;
constexpr int kNonFcdReductionThreadX = 32;
constexpr int kNonFcdReductionThreadY = 32;
TORCH_CUDA_CU_API bool hasReductionNode(const Block* block);
TORCH_CUDA_CU_API bool isReductionToSizeNode(const Node* node);
TORCH_CUDA_CU_API bool isReductionNode(const Node* node);
TORCH_CUDA_CU_API bool hasNormalizationNode(const Block* block);
TORCH_CUDA_CU_API bool isNormalizationNode(const Node* node);
TORCH_CUDA_CU_API bool isElementWiseNode(const Node* node);
// returns whether or not a parsing function exists for the given node type.
TORCH_CUDA_CU_API bool isNodeParsible(const Node* node);
TORCH_CUDA_CU_API bool shouldProfileNode(const Node* node);
TORCH_CUDA_CU_API bool skipNodeKind(const std::string& symbol_str, bool flip);
void InsertProfileNodes(ProfilingRecord* pr);
// lowers PyTorch jit graph to `Fusion`.
TORCH_CUDA_CU_API std::unique_ptr<Fusion> parseJitIR(
const std::shared_ptr<Graph>& graph);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
|