File: __init__.py

package info (click to toggle)
dask.distributed 2024.12.1%2Bds-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 12,588 kB
  • sloc: python: 96,954; javascript: 1,549; sh: 390; makefile: 220
file content (163 lines) | stat: -rw-r--r-- 3,768 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
from __future__ import annotations

# isort: off
from distributed import config  # load distributed configuration first
from distributed import widgets  # load distributed widgets second

# isort: on

import atexit
import weakref

# This finalizer registers an atexit handler that has to happen before
# distributed registers its handlers, otherwise we observe hangs on
# cluster shutdown when using the UCX comms backend. See
# https://github.com/dask/distributed/issues/7726 for more discussion
# of the problem and the search for long term solutions

weakref.finalize(lambda: None, lambda: None)
import dask
from dask.config import config  # type: ignore

from distributed._version import get_versions
from distributed.actor import Actor, ActorFuture, BaseActorFuture
from distributed.client import (
    Client,
    CompatibleExecutor,
    Future,
    as_completed,
    default_client,
    fire_and_forget,
    futures_of,
    get_task_metadata,
    get_task_stream,
    performance_report,
    wait,
)
from distributed.core import Status, connect, rpc
from distributed.deploy import (
    Adaptive,
    LocalCluster,
    SpecCluster,
    SSHCluster,
    SubprocessCluster,
)
from distributed.diagnostics.plugin import (
    CondaInstall,
    Environ,
    InstallPlugin,
    NannyPlugin,
    PipInstall,
    SchedulerPlugin,
    UploadDirectory,
    UploadFile,
    WorkerPlugin,
)
from distributed.diagnostics.progressbar import progress
from distributed.event import Event
from distributed.lock import Lock
from distributed.multi_lock import MultiLock
from distributed.nanny import Nanny
from distributed.pubsub import Pub, Sub
from distributed.queues import Queue
from distributed.scheduler import KilledWorker, Scheduler
from distributed.security import Security
from distributed.semaphore import Semaphore
from distributed.spans import span
from distributed.threadpoolexecutor import rejoin
from distributed.utils import CancelledError, TimeoutError, sync
from distributed.variable import Variable
from distributed.worker import (
    Reschedule,
    Worker,
    get_client,
    get_worker,
    print,
    secede,
    warn,
)
from distributed.worker_client import local_client, worker_client


def __getattr__(name):
    global __version__, __git_revision__

    if name == "__version__":
        from importlib.metadata import version

        __version__ = version("distributed")
        return __version__

    if name == "__git_revision__":
        from distributed._version import get_versions

        __git_revision__ = get_versions()["full-revisionid"]
        return __git_revision__

    raise AttributeError(f"module {__name__!r} has no attribute {name!r}")


__all__ = [
    "Actor",
    "ActorFuture",
    "Adaptive",
    "BaseActorFuture",
    "CancelledError",
    "Client",
    "CompatibleExecutor",
    "CondaInstall",
    "Environ",
    "Event",
    "Future",
    "KilledWorker",
    "LocalCluster",
    "Lock",
    "MultiLock",
    "Nanny",
    "NannyPlugin",
    "InstallPlugin",
    "PipInstall",
    "Pub",
    "Queue",
    "Reschedule",
    "SSHCluster",
    "Scheduler",
    "SchedulerPlugin",
    "Security",
    "Semaphore",
    "SpecCluster",
    "Status",
    "Sub",
    "SubprocessCluster",
    "TimeoutError",
    "UploadDirectory",
    "UploadFile",
    "Variable",
    "Worker",
    "WorkerPlugin",
    "as_completed",
    "config",
    "connect",
    "dask",
    "default_client",
    "fire_and_forget",
    "futures_of",
    "get_client",
    "get_task_metadata",
    "get_task_stream",
    "get_versions",
    "get_worker",
    "local_client",
    "performance_report",
    "print",
    "progress",
    "rejoin",
    "rpc",
    "secede",
    "span",
    "sync",
    "wait",
    "warn",
    "widgets",
    "worker_client",
]