1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
|
#include <torch/csrc/jit/backends/backend_debug_handler.h>
#include <stack>
namespace torch {
namespace jit {
std::atomic<DebugHandleType> BackendDebugInfoRecorder::unique_debug_handle_{0};
int64_t BackendDebugInfoRecorder::getNextDebugHandle(const Node* node) {
InlinedCallStackPtr cs_ptr;
if (node->callstack().has_value()) {
cs_ptr = node->callstack().value();
} else {
cs_ptr = c10::intrusive_ptr<InlinedCallStack>();
}
DebugHandleType debug_handle = unique_debug_handle_;
const SourceRange& range = node->sourceRange();
handles_to_inlined_callstack_ptrs_[debug_handle] =
std::make_tuple(range, node->kind().toQualString(), cs_ptr);
// This increment is with seq memory order.
// Not trying to perf optimizing this for now.
unique_debug_handle_++;
return debug_handle;
}
BackendDebugInfoMapType BackendDebugInfoRecorder::stopRecording() {
// Note that this is return by copy and since
// InlinedCallStackPtrs are intrusive ptr it will result in
// bump of refcount. Not performant, but this is not intented
// to be used in perf critical path.
// Alternate might be do move but that will be destructive
return handles_to_inlined_callstack_ptrs_;
}
} // namespace jit
} // namespace torch
|