1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
|
#include <torch/csrc/jit/codegen/onednn/decompose_silu.h>
#include <torch/csrc/jit/codegen/onednn/operator.h>
#include <ATen/code_template.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/subgraph_rewrite.h>
namespace torch::jit::fuser::onednn {
static bool shouldDecomposeSilu(Node* node) {
if (node->kind() != aten::silu) {
return false;
}
auto inputToSilu = node->input(0)->node();
if (inputToSilu->kind() == aten::_convolution) {
// TODO: remove transpose check once the bridge supported ConvTranspose
bool transposed = Operator::Bool(inputToSilu, 6);
return !transposed;
}
if (inputToSilu->kind() == aten::linear) {
return true;
}
return false;
}
static void DecomposeSilu(Node* node) {
if (shouldDecomposeSilu(node)) {
auto dtype = node->input(0)->type()->expect<TensorType>();
WithInsertPoint guard(node);
auto g = node->owningGraph();
auto sigmoid = g->insert(aten::sigmoid, {node->input(0)});
sigmoid->setType(dtype);
auto mul = g->insert(aten::mul, {sigmoid, node->input(0)});
mul->setType(dtype);
node->output()->replaceAllUsesWith(mul);
}
}
static void DecomposeSilu(Block* block) {
for (auto node : block->nodes()) {
for (auto sub : node->blocks()) {
DecomposeSilu(sub);
}
if (node->kind() == aten::silu) {
DecomposeSilu(node);
}
}
}
void DecomposeSiluForLLGA(std::shared_ptr<Graph>& graph) {
DecomposeSilu(graph->block());
EliminateDeadCode(graph);
}
} // namespace torch::jit::fuser::onednn
|