|
17 | 17 | from zarr.core.common import AccessModeLiteral |
18 | 18 |
|
19 | 19 |
|
| 20 | +# T = TypeVar("T", bound=Buffer | gpu.Buffer) |
| 21 | + |
| 22 | + |
| 23 | +# class _MemoryStore |
| 24 | + |
| 25 | + |
20 | 26 | # TODO: this store could easily be extended to wrap any MutableMapping store from v2 |
21 | 27 | # When that is done, the `MemoryStore` will just be a store that wraps a dict. |
22 | 28 | class MemoryStore(Store): |
@@ -161,19 +167,23 @@ class GpuMemoryStore(MemoryStore): |
161 | 167 | of the original location. This guarantees that chunks will always be in GPU |
162 | 168 | memory for downstream processing. For location agnostic use cases, it would |
163 | 169 | be better to use `MemoryStore` instead. |
| 170 | +
|
| 171 | + Parameters |
| 172 | + ---------- |
| 173 | + store_dict: MutableMapping, optional |
| 174 | + A mutable mapping with string keys and :class:`zarr.core.buffer.gpu.Buffer` |
| 175 | + values. |
164 | 176 | """ |
165 | 177 |
|
166 | 178 | _store_dict: MutableMapping[str, Buffer] |
167 | 179 |
|
168 | 180 | def __init__( |
169 | 181 | self, |
170 | | - store_dict: MutableMapping[str, Buffer] | None = None, |
| 182 | + store_dict: MutableMapping[str, gpu.Buffer] | None = None, |
171 | 183 | *, |
172 | 184 | mode: AccessModeLiteral = "r", |
173 | 185 | ) -> None: |
174 | | - super().__init__(mode=mode) |
175 | | - if store_dict: |
176 | | - self._store_dict = {k: gpu.Buffer.from_buffer(store_dict[k]) for k in iter(store_dict)} |
| 186 | + super().__init__(store_dict=store_dict, mode=mode) # type: ignore[arg-type] |
177 | 187 |
|
178 | 188 | def __str__(self) -> str: |
179 | 189 | return f"gpumemory://{id(self._store_dict)}" |
|
0 commit comments