|
17 | 17 | import asyncio |
18 | 18 | from collections.abc import Callable, Coroutine, Hashable |
19 | 19 | from functools import partial |
20 | | -from typing import Any, ParamSpec, TypeVar |
| 20 | +from typing import Any, Generic, ParamSpec, TypeVar |
21 | 21 |
|
22 | 22 | from ._cpython_stuff import make_key |
23 | 23 |
|
24 | | -__all__ = ("taskcache",) |
| 24 | +__all__ = ("taskcache", "LRU", "lrutaskcache") |
25 | 25 |
|
26 | 26 |
|
27 | 27 | P = ParamSpec("P") |
28 | 28 | T = TypeVar("T") |
| 29 | +K = TypeVar("K") |
| 30 | +V = TypeVar("V") |
| 31 | + |
| 32 | + |
| 33 | +class LRU(Generic[K, V]): |
| 34 | + def __init__(self, maxsize: int, /): |
| 35 | + self.cache: dict[K, V] = {} |
| 36 | + self.maxsize = maxsize |
| 37 | + |
| 38 | + def get(self, key: K, default: T, /) -> V | T: |
| 39 | + if key not in self.cache: |
| 40 | + return default |
| 41 | + self.cache[key] = self.cache.pop(key) |
| 42 | + return self.cache[key] |
| 43 | + |
| 44 | + def __getitem__(self, key: K, /) -> V: |
| 45 | + self.cache[key] = self.cache.pop(key) |
| 46 | + return self.cache[key] |
| 47 | + |
| 48 | + def __setitem__(self, key: K, value: V, /): |
| 49 | + self.cache[key] = value |
| 50 | + if len(self.cache) > self.maxsize: |
| 51 | + self.cache.pop(next(iter(self.cache))) |
| 52 | + |
| 53 | + def remove(self, key: K) -> None: |
| 54 | + self.cache.pop(key, None) |
29 | 55 |
|
30 | 56 |
|
31 | 57 | def taskcache( |
@@ -66,3 +92,46 @@ def wrapped(*args: P.args, **kwargs: P.kwargs) -> asyncio.Task[T]: |
66 | 92 | return wrapped |
67 | 93 |
|
68 | 94 | return wrapper |
| 95 | + |
| 96 | + |
| 97 | +def lrutaskcache( |
| 98 | + ttl: float | None = None, maxsize: int = 1024 |
| 99 | +) -> Callable[[Callable[P, Coroutine[Any, Any, T]]], Callable[P, asyncio.Task[T]]]: |
| 100 | + """Decorator to modify coroutine functions to instead act as functions returning cached tasks. |
| 101 | +
|
| 102 | + For general use, this leaves the end user API largely the same, |
| 103 | + while leveraging tasks to allow preemptive caching. |
| 104 | +
|
| 105 | + Note: This uses the args and kwargs of the original coroutine function as a cache key. |
| 106 | + This includes instances (self) when wrapping methods. |
| 107 | + Consider not wrapping instance methods, but what those methods call when feasible in cases where this may matter. |
| 108 | +
|
| 109 | + The ordering of args and kwargs matters. |
| 110 | +
|
| 111 | + tasks are evicted by LRU and ttl. |
| 112 | + """ |
| 113 | + |
| 114 | + def wrapper(coro: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, asyncio.Task[T]]: |
| 115 | + internal_cache: LRU[Hashable, asyncio.Task[T]] = LRU(maxsize) |
| 116 | + |
| 117 | + def wrapped(*args: P.args, **kwargs: P.kwargs) -> asyncio.Task[T]: |
| 118 | + key = make_key(args, kwargs) |
| 119 | + try: |
| 120 | + return internal_cache[key] |
| 121 | + except KeyError: |
| 122 | + internal_cache[key] = task = asyncio.create_task(coro(*args, **kwargs)) |
| 123 | + if ttl is not None: |
| 124 | + # This results in internal_cache.pop(key, task) later |
| 125 | + # while avoiding a late binding issue with a lambda instead |
| 126 | + call_after_ttl = partial( |
| 127 | + asyncio.get_running_loop().call_later, |
| 128 | + ttl, |
| 129 | + internal_cache.remove, |
| 130 | + key, |
| 131 | + ) |
| 132 | + task.add_done_callback(call_after_ttl) |
| 133 | + return task |
| 134 | + |
| 135 | + return wrapped |
| 136 | + |
| 137 | + return wrapper |
0 commit comments