1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
|
#pragma once
#include <unordered_map>
#include <vector>
#include <torch/csrc/jit/tensorexpr/analysis.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/reduction.h>
namespace torch {
namespace jit {
namespace tensorexpr {
using VarMapping = std::vector<std::pair<const Var*, const Expr*>>;
class VarSubMutator : public IRMutator {
public:
VarSubMutator(const VarMapping& var_mapping) {
for (const auto& entry : var_mapping) {
const Var* key_var = entry.first;
const Expr* value = entry.second;
if (!key_var) {
throw malformed_input("missing key in VarSubMutator");
}
var_mapping_[key_var] = value;
}
}
const Expr* mutate(const Var* var) override {
auto iter = var_mapping_.find(var);
if (iter == var_mapping_.end()) {
return var;
}
return iter->second;
}
const Expr* mutate(const ReduceOp* var) override {
auto body = var->body().node()->accept_mutator(this);
std::vector<const Expr*> new_outer;
std::vector<const Var*> new_inner;
for (auto* v : var->output_args()) {
new_outer.push_back(v->accept_mutator(this));
}
for (auto* v : var->reduce_args()) {
const Expr* e = v->accept_mutator(this);
if (const Var* new_var = dynamic_cast<const Var*>(e)) {
new_inner.push_back(new_var);
} else {
VarFinder varFinder;
e->accept(&varFinder);
auto varlist = varFinder.vars();
new_inner.insert(new_inner.end(), varlist.begin(), varlist.end());
}
}
return new ReduceOp(
const_cast<Buf*>(var->accumulator()),
ExprHandle(body),
var->interaction(),
new_outer,
new_inner);
}
private:
std::unordered_map<const Var*, const Expr*> var_mapping_;
};
} // namespace tensorexpr
} // namespace jit
} // namespace torch
|