1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
|
import torch
from torch.utils._pytree import tree_map
from typing import Iterator, List
import logging
import contextlib
import itertools
from torch.utils._python_dispatch import TorchDispatchMode
# How the chain of calls works for LoggingTensor:
# 1. Call torch.sin
# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely
# 3. Enter dispatcher, wind your way through Autograd
# 4. Hit Python dispatch key, call __torch_dispatch__
# This Tensor can work with autograd in two ways:
# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor
# can require gradients if the user asks for it as a constructor kwarg.
# - The wrapped Tensor can require gradients. In that case autograd will be tracked
# for the wrapped Tensor and the LoggingTensor itself cannot require gradients.
# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single
# test or you might get surprising behavior.
# TODO: TensorBase should work
class LoggingTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
context = contextlib.nullcontext
__torch_function__ = torch._C._disabled_torch_function_impl
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (LoggingTensor) shouldn't hold any
# memory for the class in question, but it should still
# advertise the same device as before
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
# TODO: clone storage aliasing
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=kwargs.get("requires_grad", False)
)
# ...the real tensor is held as an element on the tensor.
r.elem = elem.detach() if r.requires_grad else elem
return r
def __repr__(self):
return super().__repr__(tensor_contents=f"{self.elem}")
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, cls) else e
def wrap(e):
return cls(e) if isinstance(e, torch.Tensor) else e
with cls.context():
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)))
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs)
return rs
class LoggingTensorMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
rs = func(*args, **kwargs)
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs)
return rs
class LoggingTensorReentrant(LoggingTensor):
context = torch.overrides.enable_reentrant_dispatch
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
class LoggingTensorHandler(logging.Handler):
log_list: List[str]
next_shortid: int
def __init__(self, log_list: List[str], use_shortid_for_all_tensors: bool) -> None:
logging.Handler.__init__(self)
self.log_list = log_list
self.next_shortid = 0
self.use_shortid_for_all_tensors = use_shortid_for_all_tensors
# WARNING: not deterministic over multiple threads, this matters for
# autograd
def _shortid(self, o: object) -> int:
if not hasattr(o, '_shortid'):
o._shortid = self.next_shortid # type: ignore[attr-defined]
self.next_shortid += 1
return o._shortid # type: ignore[attr-defined]
def _fmt(self, a: object) -> str:
cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor
return f'${self._shortid(a)}' if isinstance(a, cond_cls) else repr(a)
def emit(self, record):
fmt_args = ", ".join(itertools.chain(
(self._fmt(a) for a in record.args[0]),
(f"{k}={self._fmt(v)}" for k, v in record.args[1].items())
))
fmt_rets = ", ".join(self._fmt(a) for a in record.args[2]) \
if isinstance(record.args[2], (list, tuple)) else self._fmt(record.args[2])
self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})')
def log_input(name: str, var: object):
logging.getLogger("LoggingTensor").info("input", (name,), {}, (var,))
@contextlib.contextmanager
def capture_logs(is_mode=False) -> Iterator[List[str]]:
logger = logging.getLogger("LoggingTensor")
log_list: List[str] = []
handler = LoggingTensorHandler(log_list, use_shortid_for_all_tensors=is_mode)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
try:
yield log_list
finally:
logger.removeHandler(handler)
@contextlib.contextmanager
def capture_logs_with_logging_tensor_mode():
with LoggingTensorMode(), capture_logs(True) as logs:
yield logs
|