File: _distributed_autograd.pyi

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (27 lines) | stat: -rw-r--r-- 918 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# mypy: allow-untyped-defs
from typing import Any

import torch

# This module is defined in torch/csrc/distributed/autograd/init.cpp

class DistAutogradContext:
    def _context_id(self) -> int: ...
    def _recv_functions(self) -> dict[int, Any]: ...
    def _send_functions(self) -> dict[int, Any]: ...
    def _known_worker_ids(self) -> set[int]: ...

def _new_context() -> DistAutogradContext: ...
def _release_context(context_id: int) -> None: ...
def _get_max_id() -> int: ...
def _is_valid_context(worker_id: int) -> bool: ...
def _retrieve_context(context_id: int) -> DistAutogradContext: ...
def _current_context() -> DistAutogradContext: ...
def _init(worker_id: int) -> None: ...
def _get_debug_info() -> dict[str, str]: ...
def backward(
    context_id: int,
    roots: list[torch.Tensor],
    retain_graph=False,
) -> None: ...
def get_gradients(context_id: int) -> dict[torch.Tensor, torch.Tensor]: ...