|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +import atexit |
| 4 | +import os |
| 5 | +import pickle |
| 6 | +import sys |
| 7 | +from collections import deque |
| 8 | +from collections.abc import Callable |
| 9 | +from textwrap import dedent |
| 10 | +from typing import Any, Final, TypeVar |
| 11 | + |
| 12 | +from . import current_time, to_thread |
| 13 | +from ._core._exceptions import BrokenWorkerIntepreter |
| 14 | +from ._core._synchronization import CapacityLimiter |
| 15 | +from .lowlevel import RunVar |
| 16 | + |
| 17 | +if sys.version_info >= (3, 11): |
| 18 | + from typing import TypeVarTuple, Unpack |
| 19 | +else: |
| 20 | + from typing_extensions import TypeVarTuple, Unpack |
| 21 | + |
| 22 | +UNBOUND: Final = 2 # I have no clue how this works, but it was used in the stdlib |
| 23 | +FMT_UNPICKLED: Final = 0 |
| 24 | +FMT_PICKLED: Final = 1 |
| 25 | +DEFAULT_CPU_COUNT: Final = 8 # this is just an arbitrarily selected value |
| 26 | +MAX_WORKER_IDLE_TIME = ( |
| 27 | + 30 # seconds a subinterpreter can be idle before becoming eligible for pruning |
| 28 | +) |
| 29 | + |
| 30 | +T_Retval = TypeVar("T_Retval") |
| 31 | +PosArgsT = TypeVarTuple("PosArgsT") |
| 32 | + |
| 33 | +_idle_workers = RunVar[deque["Worker"]]("_available_workers") |
| 34 | +_default_interpreter_limiter = RunVar[CapacityLimiter]("_default_interpreter_limiter") |
| 35 | + |
| 36 | + |
| 37 | +class Worker: |
| 38 | + _run_func = compile( |
| 39 | + dedent(""" |
| 40 | + import _interpqueues as queues |
| 41 | + import _interpreters as interpreters |
| 42 | + from pickle import loads, dumps, HIGHEST_PROTOCOL |
| 43 | +
|
| 44 | + item = queues.get(queue_id)[0] |
| 45 | + try: |
| 46 | + func, args = loads(item) |
| 47 | + retval = func(*args) |
| 48 | + except BaseException as exc: |
| 49 | + is_exception = True |
| 50 | + retval = exc |
| 51 | + else: |
| 52 | + is_exception = False |
| 53 | +
|
| 54 | + try: |
| 55 | + queues.put(queue_id, (retval, is_exception), FMT_UNPICKLED, UNBOUND) |
| 56 | + except interpreters.NotShareableError: |
| 57 | + retval = dumps(retval, HIGHEST_PROTOCOL) |
| 58 | + queues.put(queue_id, (retval, is_exception), FMT_PICKLED, UNBOUND) |
| 59 | + """), |
| 60 | + "<string>", |
| 61 | + "exec", |
| 62 | + ) |
| 63 | + |
| 64 | + last_used: float = 0 |
| 65 | + |
| 66 | + _initialized: bool = False |
| 67 | + _interpreter_id: int |
| 68 | + _queue_id: int |
| 69 | + |
| 70 | + def initialize(self) -> None: |
| 71 | + import _interpqueues as queues |
| 72 | + import _interpreters as interpreters |
| 73 | + |
| 74 | + self._interpreter_id = interpreters.create() |
| 75 | + self._queue_id = queues.create(2, FMT_UNPICKLED, UNBOUND) # type: ignore[call-arg] |
| 76 | + self._initialized = True |
| 77 | + interpreters.set___main___attrs( |
| 78 | + self._interpreter_id, |
| 79 | + { |
| 80 | + "queue_id": self._queue_id, |
| 81 | + "FMT_PICKLED": FMT_PICKLED, |
| 82 | + "FMT_UNPICKLED": FMT_UNPICKLED, |
| 83 | + "UNBOUND": UNBOUND, |
| 84 | + }, |
| 85 | + ) |
| 86 | + |
| 87 | + def destroy(self) -> None: |
| 88 | + import _interpqueues as queues |
| 89 | + import _interpreters as interpreters |
| 90 | + |
| 91 | + if self._initialized: |
| 92 | + interpreters.destroy(self._interpreter_id) |
| 93 | + queues.destroy(self._queue_id) |
| 94 | + |
| 95 | + def _call( |
| 96 | + self, |
| 97 | + func: Callable[..., T_Retval], |
| 98 | + args: tuple[Any], |
| 99 | + ) -> tuple[Any, bool]: |
| 100 | + import _interpqueues as queues |
| 101 | + import _interpreters as interpreters |
| 102 | + |
| 103 | + if not self._initialized: |
| 104 | + self.initialize() |
| 105 | + |
| 106 | + payload = pickle.dumps((func, args), pickle.HIGHEST_PROTOCOL) |
| 107 | + queues.put(self._queue_id, payload, FMT_PICKLED, UNBOUND) # type: ignore[call-arg] |
| 108 | + |
| 109 | + res: Any |
| 110 | + is_exception: bool |
| 111 | + if exc_info := interpreters.exec(self._interpreter_id, self._run_func): # type: ignore[func-returns-value,arg-type] |
| 112 | + raise BrokenWorkerIntepreter(exc_info) |
| 113 | + |
| 114 | + (res, is_exception), fmt = queues.get(self._queue_id)[:2] |
| 115 | + if fmt == FMT_PICKLED: |
| 116 | + res = pickle.loads(res) |
| 117 | + |
| 118 | + return res, is_exception |
| 119 | + |
| 120 | + async def call( |
| 121 | + self, |
| 122 | + func: Callable[..., T_Retval], |
| 123 | + args: tuple[Any], |
| 124 | + limiter: CapacityLimiter, |
| 125 | + ) -> T_Retval: |
| 126 | + result, is_exception = await to_thread.run_sync( |
| 127 | + self._call, |
| 128 | + func, |
| 129 | + args, |
| 130 | + limiter=limiter, |
| 131 | + ) |
| 132 | + if is_exception: |
| 133 | + raise result |
| 134 | + |
| 135 | + return result |
| 136 | + |
| 137 | + |
| 138 | +def _stop_workers(workers: deque[Worker]) -> None: |
| 139 | + for worker in workers: |
| 140 | + worker.destroy() |
| 141 | + |
| 142 | + workers.clear() |
| 143 | + |
| 144 | + |
| 145 | +async def run_sync( |
| 146 | + func: Callable[[Unpack[PosArgsT]], T_Retval], |
| 147 | + *args: Unpack[PosArgsT], |
| 148 | + limiter: CapacityLimiter | None = None, |
| 149 | +) -> T_Retval: |
| 150 | + """ |
| 151 | + Call the given function with the given arguments in a subinterpreter. |
| 152 | +
|
| 153 | + If the ``cancellable`` option is enabled and the task waiting for its completion is |
| 154 | + cancelled, the call will still run its course but its return value (or any raised |
| 155 | + exception) will be ignored. |
| 156 | +
|
| 157 | + .. warning:: This feature is **experimental**. The upstream interpreter API has not |
| 158 | + yet been finalized or thoroughly tested, so don't rely on this for anything |
| 159 | + mission critical. |
| 160 | +
|
| 161 | + :param func: a callable |
| 162 | + :param args: positional arguments for the callable |
| 163 | + :param limiter: capacity limiter to use to limit the total amount of subinterpreters |
| 164 | + running (if omitted, the default limiter is used) |
| 165 | + :return: the result of the call |
| 166 | + :raises BrokenWorkerIntepreter: if there's an internal error in a subinterpreter |
| 167 | +
|
| 168 | + """ |
| 169 | + if sys.version_info <= (3, 13): |
| 170 | + raise RuntimeError("subinterpreters require at least Python 3.13") |
| 171 | + |
| 172 | + if limiter is None: |
| 173 | + limiter = current_default_interpreter_limiter() |
| 174 | + |
| 175 | + try: |
| 176 | + idle_workers = _idle_workers.get() |
| 177 | + except LookupError: |
| 178 | + idle_workers = deque() |
| 179 | + _idle_workers.set(idle_workers) |
| 180 | + atexit.register(_stop_workers, idle_workers) |
| 181 | + |
| 182 | + async with limiter: |
| 183 | + try: |
| 184 | + worker = idle_workers.pop() |
| 185 | + except IndexError: |
| 186 | + worker = Worker() |
| 187 | + |
| 188 | + try: |
| 189 | + return await worker.call(func, args, limiter) |
| 190 | + finally: |
| 191 | + # Prune workers that have been idle for too long |
| 192 | + now = current_time() |
| 193 | + while idle_workers: |
| 194 | + if now - idle_workers[0].last_used <= MAX_WORKER_IDLE_TIME: |
| 195 | + break |
| 196 | + |
| 197 | + await to_thread.run_sync(idle_workers.popleft().destroy, limiter=limiter) |
| 198 | + |
| 199 | + worker.last_used = current_time() |
| 200 | + idle_workers.append(worker) |
| 201 | + |
| 202 | + |
| 203 | +def current_default_interpreter_limiter() -> CapacityLimiter: |
| 204 | + """ |
| 205 | + Return the capacity limiter that is used by default to limit the number of |
| 206 | + concurrently running subinterpreters. |
| 207 | +
|
| 208 | + Defaults to the number of CPU cores. |
| 209 | +
|
| 210 | + :return: a capacity limiter object |
| 211 | +
|
| 212 | + """ |
| 213 | + try: |
| 214 | + return _default_interpreter_limiter.get() |
| 215 | + except LookupError: |
| 216 | + limiter = CapacityLimiter(os.cpu_count() or DEFAULT_CPU_COUNT) |
| 217 | + _default_interpreter_limiter.set(limiter) |
| 218 | + return limiter |
0 commit comments