File: _distributed_autograd.pyi

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (25 lines) | stat: -rw-r--r-- 908 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import torch
from typing import Dict, List, Set, Any

# This module is defined in torch/csrc/distributed/autograd/init.cpp

class DistAutogradContext:
    def _context_id(self) -> int: ...
    def _recv_functions(self) -> Dict[int, Any]: ...
    def _send_functions(self) -> Dict[int, Any]: ...
    def _known_worker_ids(self) -> Set[int]: ...

def _new_context() -> DistAutogradContext: ...
def _release_context(context_id: int) -> None: ...
def _get_max_id() -> int: ...
def _is_valid_context(worker_id: int) -> bool: ...
def _retrieve_context(context_id: int) -> DistAutogradContext: ...
def _current_context() -> DistAutogradContext: ...
def _init(worker_id: int) -> None: ...
def _get_debug_info() -> Dict[str, str]: ...
def backward(
    context_id: int,
    roots: List[torch.Tensor],
    retain_graph = False
) -> None: ...
def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ...