1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
|
# The code below is mostly my own but based on the interfaces of the
# curio library by David Beazley. I'm considering switching to using
# curio. In the mean-time this is an attempt to provide a similar
# clean, pure-async interface and move away from direct
# framework-specific dependencies. As asyncio differs in its design
# it is not possible to provide identical semantics.
#
# The curio library is distributed under the following licence:
#
# Copyright (C) 2015-2017
# David Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from asyncio import (
CancelledError, get_event_loop, Queue, Event, Lock, Semaphore, sleep
)
from collections import deque
from contextlib import suppress
from functools import partial
import logging
import sys
from aiorpcx.util import instantiate_coroutine, check_task
__all__ = (
'Queue', 'Event', 'Lock', 'Semaphore', 'sleep', 'CancelledError',
'run_in_thread', 'spawn', 'spawn_sync', 'TaskGroup', 'NoRemainingTasksError',
'TaskTimeout', 'TimeoutCancellationError', 'UncaughtTimeoutError',
'timeout_after', 'timeout_at', 'ignore_after', 'ignore_at',
)
if sys.version_info >= (3, 7):
from asyncio import current_task
else:
from asyncio import Task
current_task = Task.current_task
async def run_in_thread(func, *args):
'''Run a function in a separate thread, and await its completion.'''
return await get_event_loop().run_in_executor(None, func, *args)
async def spawn(coro, *args, loop=None, report_crash=True):
return spawn_sync(coro, *args, loop=loop, report_crash=report_crash)
def spawn_sync(coro, *args, loop=None, report_crash=True):
coro = instantiate_coroutine(coro, args)
loop = loop or get_event_loop()
task = loop.create_task(coro)
if report_crash:
task.add_done_callback(partial(check_task, logging))
return task
class NoRemainingTasksError(RuntimeError):
pass
class TaskGroup(object):
'''A class representing a group of executing tasks. tasks is an
optional set of existing tasks to put into the group. New tasks
can later be added using the spawn() method below. wait specifies
the policy used for waiting for tasks. See the join() method
below. Each TaskGroup is an independent entity. Task groups do not
form a hierarchy or any kind of relationship to other previously
created task groups or tasks. Moreover, Tasks created by the top
level spawn() function are not placed into any task group. To
create a task in a group, it should be created using
TaskGroup.spawn() or explicitly added using TaskGroup.add_task().
completed attribute: the first task that completed with a result
in the group. Takes into account the wait option used in the
TaskGroup constructor.
'''
def __init__(self, tasks=(), *, wait=all):
if wait not in (any, all, object):
raise ValueError('invalid wait argument')
self._done = deque()
self._pending = set()
self._wait = wait
self._done_event = Event()
self._logger = logging.getLogger(self.__class__.__name__)
self._closed = False
self.completed = None
for task in tasks:
self._add_task(task)
def _add_task(self, task):
'''Add an already existing task to the task group.'''
if hasattr(task, '_task_group'):
raise RuntimeError('task is already part of a group')
if self._closed:
raise RuntimeError('task group is closed')
task._task_group = self
if task.done():
self._done.append(task)
else:
self._pending.add(task)
task.add_done_callback(self._on_done)
def _on_done(self, task):
task._task_group = None
self._pending.remove(task)
self._done.append(task)
self._done_event.set()
if self.completed is None:
if not task.cancelled() and not task.exception():
if self._wait is object and task.result() is None:
pass
else:
self.completed = task
async def spawn(self, coro, *args):
'''Create a new task that’s part of the group. Returns a Task
instance.
'''
task = await spawn(coro, *args, report_crash=False)
self._add_task(task)
return task
async def add_task(self, task):
'''Add an already existing task to the task group.'''
self._add_task(task)
async def next_done(self):
'''Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
'''
if not self._done and self._pending:
self._done_event.clear()
await self._done_event.wait()
if self._done:
return self._done.popleft()
return None
async def next_result(self):
'''Returns the result of the next completed task. If the task failed
with an exception, that exception is raised. A RuntimeError
exception is raised if this is called when no remaining tasks
are available.'''
task = await self.next_done()
if not task:
raise NoRemainingTasksError('no tasks remain')
return task.result()
async def join(self):
'''Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
'''
def errored(task):
return not task.cancelled() and task.exception()
try:
if self._wait in (all, object):
while True:
task = await self.next_done()
if task is None:
return
if errored(task):
break
if self._wait is object:
if task.cancelled() or task.result() is not None:
return
else: # any
task = await self.next_done()
if task is None or not errored(task):
return
finally:
await self.cancel_remaining()
if errored(task):
raise task.exception()
async def cancel_remaining(self):
'''Cancel all remaining tasks.'''
self._closed = True
task_list = list(self._pending)
for task in task_list:
task.cancel()
for task in task_list:
with suppress(CancelledError):
await task
def closed(self):
return self._closed
def __aiter__(self):
return self
async def __anext__(self):
task = await self.next_done()
if task:
return task
raise StopAsyncIteration
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type:
await self.cancel_remaining()
else:
await self.join()
class TaskTimeout(CancelledError):
def __init__(self, secs):
self.secs = secs
def __str__(self):
return f'task timed out after {self.args[0]}s'
class TimeoutCancellationError(CancelledError):
pass
class UncaughtTimeoutError(Exception):
pass
def _set_new_deadline(task, deadline):
def timeout_task():
# Unfortunately task.cancel is all we can do with asyncio
task.cancel()
task._timed_out = deadline
task._deadline_handle = task._loop.call_at(deadline, timeout_task)
def _set_task_deadline(task, deadline):
deadlines = getattr(task, '_deadlines', [])
if deadlines:
if deadline < min(deadlines):
task._deadline_handle.cancel()
_set_new_deadline(task, deadline)
else:
_set_new_deadline(task, deadline)
deadlines.append(deadline)
task._deadlines = deadlines
task._timed_out = None
def _unset_task_deadline(task):
deadlines = task._deadlines
timed_out_deadline = task._timed_out
uncaught = timed_out_deadline not in deadlines
task._deadline_handle.cancel()
deadlines.pop()
if deadlines:
_set_new_deadline(task, min(deadlines))
return timed_out_deadline, uncaught
class TimeoutAfter(object):
def __init__(self, deadline, *, ignore=False, absolute=False):
self._deadline = deadline
self._ignore = ignore
self._absolute = absolute
self.expired = False
async def __aenter__(self):
task = current_task()
loop_time = task._loop.time()
if self._absolute:
self._secs = self._deadline - loop_time
else:
self._secs = self._deadline
self._deadline += loop_time
_set_task_deadline(task, self._deadline)
self.expired = False
self._task = task
return self
async def __aexit__(self, exc_type, exc_value, traceback):
timed_out_deadline, uncaught = _unset_task_deadline(self._task)
if exc_type not in (CancelledError, TaskTimeout,
TimeoutCancellationError):
return False
if timed_out_deadline == self._deadline:
self.expired = True
if self._ignore:
return True
raise TaskTimeout(self._secs) from None
if timed_out_deadline is None:
return False
if uncaught:
raise UncaughtTimeoutError('uncaught timeout received')
if exc_type is TimeoutCancellationError:
return False
raise TimeoutCancellationError(timed_out_deadline) from None
async def _timeout_after_func(seconds, absolute, coro, args):
coro = instantiate_coroutine(coro, args)
async with TimeoutAfter(seconds, absolute=absolute):
return await coro
def timeout_after(seconds, coro=None, *args):
'''Execute the specified coroutine and return its result. However,
issue a cancellation request to the calling task after seconds
have elapsed. When this happens, a TaskTimeout exception is
raised. If coro is None, the result of this function serves
as an asynchronous context manager that applies a timeout to a
block of statements.
timeout_after() may be composed with other timeout_after()
operations (i.e., nested timeouts). If an outer timeout expires
first, then TimeoutCancellationError is raised instead of
TaskTimeout. If an inner timeout expires and fails to properly
TaskTimeout, a UncaughtTimeoutError is raised in the outer
timeout.
'''
if coro:
return _timeout_after_func(seconds, False, coro, args)
return TimeoutAfter(seconds)
def timeout_at(clock, coro=None, *args):
'''Execute the specified coroutine and return its result. However,
issue a cancellation request to the calling task after seconds
have elapsed. When this happens, a TaskTimeout exception is
raised. If coro is None, the result of this function serves
as an asynchronous context manager that applies a timeout to a
block of statements.
timeout_after() may be composed with other timeout_after()
operations (i.e., nested timeouts). If an outer timeout expires
first, then TimeoutCancellationError is raised instead of
TaskTimeout. If an inner timeout expires and fails to properly
TaskTimeout, a UncaughtTimeoutError is raised in the outer
timeout.
'''
if coro:
return _timeout_after_func(clock, True, coro, args)
return TimeoutAfter(clock, absolute=True)
async def _ignore_after_func(seconds, absolute, coro, args, timeout_result):
coro = instantiate_coroutine(coro, args)
async with TimeoutAfter(seconds, absolute=absolute, ignore=True):
return await coro
return timeout_result
def ignore_after(seconds, coro=None, *args, timeout_result=None):
'''Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
'''
if coro:
return _ignore_after_func(seconds, False, coro, args, timeout_result)
return TimeoutAfter(seconds, ignore=True)
def ignore_at(clock, coro=None, *args, timeout_result=None):
'''
Stop the enclosed task or block of code at an absolute
clock value. Same usage as ignore_after().
'''
if coro:
return _ignore_after_func(clock, True, coro, args, timeout_result)
return TimeoutAfter(clock, absolute=True, ignore=True)
|