Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
49d5ee8
First working version of Zstd codec on the GPU
akshaysubr Feb 25, 2025
d548adc
Adding nvcomp to the GPU dependency list
akshaysubr Feb 25, 2025
a8c0db3
Updating codec pipeline batch size for GPU codecs to enable parallelism
akshaysubr Feb 27, 2025
69aa274
Making encode and decode async
akshaysubr Jul 3, 2025
10e1bc9
Removing custom awaitable in favor of event synchronize in an async t…
akshaysubr Jul 4, 2025
771c0c1
Merge remote-tracking branch 'upstream/main' into gpu-codecs
TomAugspurger Jul 9, 2025
ec07100
Sync convert methods
TomAugspurger Jul 9, 2025
d1c37a3
test coverage
TomAugspurger Jul 9, 2025
69ea74e
loosen dtype restriction
TomAugspurger Jul 9, 2025
1b85fdc
fixed Buffer.__add__
TomAugspurger Jul 9, 2025
f5c7814
Added whatsnew
TomAugspurger Jul 11, 2025
7671274
Merge remote-tracking branch 'upstream/main' into gpu-codecs
TomAugspurger Jul 11, 2025
d558ef8
Merge remote-tracking branch 'upstream/main' into gpu-codecs
TomAugspurger Jul 14, 2025
f16d730
Look up the codec implementation
TomAugspurger Jul 14, 2025
f0db57d
Merge branch 'main' into gpu-codecs
TomAugspurger Jul 18, 2025
048ad48
test coverage
TomAugspurger Jul 18, 2025
2282cb9
Test coverage for uninitialized chunks
TomAugspurger Jul 18, 2025
c6460b5
coverage
TomAugspurger Jul 18, 2025
3b5e294
doc update
TomAugspurger Jul 18, 2025
dd825dc
lint
TomAugspurger Jul 18, 2025
f89b232
@gpu_test
TomAugspurger Jul 18, 2025
7a4b037
wip test stuff
TomAugspurger Jul 21, 2025
398b4d1
doc updates
TomAugspurger Jul 23, 2025
dd69543
added failing compatibility test
TomAugspurger Jul 23, 2025
76f7560
added a matching test
TomAugspurger Jul 24, 2025
8b5b3f1
Some buffer coverage
TomAugspurger Jul 24, 2025
7af3a16
coverage
TomAugspurger Jul 24, 2025
996fbc0
update error message
TomAugspurger Jul 28, 2025
d24d027
private
TomAugspurger Jul 28, 2025
090349c
Merge remote-tracking branch 'upstream/main' into gpu-codecs
TomAugspurger Jul 28, 2025
83c53b0
Merge remote-tracking branch 'upstream/main' into gpu-codecs
TomAugspurger Sep 29, 2025
eb50521
test fixup
TomAugspurger Sep 29, 2025
de3b577
doc fix
TomAugspurger Sep 29, 2025
ac14838
doc fix
TomAugspurger Sep 29, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ remote = [
]
gpu = [
"cupy-cuda12x",
"nvidia-nvcomp-cu12",
]
# Development extras
test = [
Expand Down
2 changes: 2 additions & 0 deletions src/zarr/codecs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from zarr.codecs.blosc import BloscCname, BloscCodec, BloscShuffle
from zarr.codecs.bytes import BytesCodec, Endian
from zarr.codecs.crc32c_ import Crc32cCodec
from zarr.codecs.gpu import NvcompZstdCodec
from zarr.codecs.gzip import GzipCodec
from zarr.codecs.sharding import ShardingCodec, ShardingCodecIndexLocation
from zarr.codecs.transpose import TransposeCodec
Expand All @@ -17,6 +18,7 @@
"Crc32cCodec",
"Endian",
"GzipCodec",
"NvcompZstdCodec",
"ShardingCodec",
"ShardingCodecIndexLocation",
"TransposeCodec",
Expand Down
195 changes: 195 additions & 0 deletions src/zarr/codecs/gpu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
from __future__ import annotations

import asyncio
from collections.abc import Awaitable
from dataclasses import dataclass
from functools import cached_property
from typing import TYPE_CHECKING

import numpy as np

from zarr.abc.codec import BytesBytesCodec
from zarr.core.common import JSON, parse_named_configuration
from zarr.registry import register_codec

if TYPE_CHECKING:
from collections.abc import Generator, Iterable
from typing import Any, Self

from zarr.core.array_spec import ArraySpec
from zarr.core.buffer import Buffer

try:
import cupy as cp
except ImportError:
cp = None

try:
from nvidia import nvcomp
except ImportError:
nvcomp = None


class AsyncCUDAEvent(Awaitable[None]):
"""An awaitable wrapper around a CuPy CUDA event for asynchronous waiting."""

def __init__(
self, event: cp.cuda.Event, initial_delay: float = 0.001, max_delay: float = 0.1
) -> None:
"""
Initialize the async CUDA event.
Args:
event (cp.cuda.Event): The CuPy CUDA event to wait on.
initial_delay (float): Initial polling delay in seconds (default: 0.001s).
max_delay (float): Maximum polling delay in seconds (default: 0.1s).
"""
self.event = event
self.initial_delay = initial_delay
self.max_delay = max_delay

def __await__(self) -> Generator[Any, None, None]:
"""Makes the event awaitable by yielding control until the event is complete."""
return self._wait().__await__()

async def _wait(self) -> None:
"""Polls the CUDA event asynchronously with exponential backoff until it completes."""
delay = self.initial_delay
while not self.event.query(): # `query()` returns True if the event is complete
await asyncio.sleep(delay) # Yield control to other async tasks
delay = min(delay * 2, self.max_delay) # Exponential backoff


def parse_zstd_level(data: JSON) -> int:
if isinstance(data, int):
if data >= 23:
raise ValueError(f"Value must be less than or equal to 22. Got {data} instead.")
return data
raise TypeError(f"Got value with type {type(data)}, but expected an int.")


def parse_checksum(data: JSON) -> bool:
if isinstance(data, bool):
return data
raise TypeError(f"Expected bool. Got {type(data)}.")


@dataclass(frozen=True)
class NvcompZstdCodec(BytesBytesCodec):
is_fixed_size = True

level: int = 0
checksum: bool = False

def __init__(self, *, level: int = 0, checksum: bool = False) -> None:
# TODO: Set CUDA device appropriately here and also set CUDA stream
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed with leaving devices / streams as a TODO for now.

I want to enable users to overlap host-to-device memcpys with compute operations (like decode, but their own compute operations as well), but I'm not sure yet what that API will look like.

If you have any thoughts on how best to do this I'd love to hear them, and write them up as an issue.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#3271 for planning on devices and streams.


level_parsed = parse_zstd_level(level)
checksum_parsed = parse_checksum(checksum)

object.__setattr__(self, "level", level_parsed)
object.__setattr__(self, "checksum", checksum_parsed)

@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
_, configuration_parsed = parse_named_configuration(data, "zstd")
return cls(**configuration_parsed) # type: ignore[arg-type]

def to_dict(self) -> dict[str, JSON]:
return {
"name": "zstd",
"configuration": {"level": self.level, "checksum": self.checksum},
}

@cached_property
def _zstd_codec(self) -> nvcomp.Codec:
# config_dict = {algorithm = "Zstd", "level": self.level, "checksum": self.checksum}
# return Zstd.from_config(config_dict)
device = cp.cuda.Device() # Select the current default device
stream = cp.cuda.get_current_stream() # Use the current default stream
return nvcomp.Codec(
algorithm="Zstd",
bitstream_kind=nvcomp.BitstreamKind.RAW,
device_id=device.id,
cuda_stream=stream.ptr,
)

async def _convert_to_nvcomp_arrays(
self,
chunks_and_specs: Iterable[tuple[Buffer | None, ArraySpec]],
) -> tuple[list[nvcomp.Array], list[int]]:
none_indices = [i for i, (b, _) in enumerate(chunks_and_specs) if b is None]
filtered_inputs = [b.as_array_like() for b, _ in chunks_and_specs if b is not None]
# TODO: add CUDA stream here
return nvcomp.as_arrays(filtered_inputs), none_indices

async def _convert_from_nvcomp_arrays(
self,
arrays: Iterable[nvcomp.Array],
chunks_and_specs: Iterable[tuple[Buffer | None, ArraySpec]],
) -> Iterable[Buffer | None]:
return [
spec.prototype.buffer.from_array_like(cp.asarray(a, dtype=np.dtype("b"))) if a else None
for a, (_, spec) in zip(arrays, chunks_and_specs, strict=True)
]

async def decode(
self,
chunks_and_specs: Iterable[tuple[Buffer | None, ArraySpec]],
) -> Iterable[Buffer | None]:
"""Decodes a batch of chunks.
Chunks can be None in which case they are ignored by the codec.
Parameters
----------
chunks_and_specs : Iterable[tuple[Buffer | None, ArraySpec]]
Ordered set of encoded chunks with their accompanying chunk spec.
Returns
-------
Iterable[Buffer | None]
"""
chunks_and_specs = list(chunks_and_specs)

# Convert to nvcomp arrays
filtered_inputs, none_indices = await self._convert_to_nvcomp_arrays(chunks_and_specs)

outputs = self._zstd_codec.decode(filtered_inputs) if len(filtered_inputs) > 0 else []
for index in none_indices:
outputs.insert(index, None)

return await self._convert_from_nvcomp_arrays(outputs, chunks_and_specs)

async def encode(
self,
chunks_and_specs: Iterable[tuple[Buffer | None, ArraySpec]],
) -> Iterable[Buffer | None]:
"""Encodes a batch of chunks.
Chunks can be None in which case they are ignored by the codec.
Parameters
----------
chunks_and_specs : Iterable[tuple[Buffer | None, ArraySpec]]
Ordered set of to-be-encoded chunks with their accompanying chunk spec.
Returns
-------
Iterable[Buffer | None]
"""
# TODO: Make this actually async
chunks_and_specs = list(chunks_and_specs)

# Convert to nvcomp arrays
filtered_inputs, none_indices = await self._convert_to_nvcomp_arrays(chunks_and_specs)

outputs = self._zstd_codec.encode(filtered_inputs) if len(filtered_inputs) > 0 else []
for index in none_indices:
outputs.insert(index, None)

return await self._convert_from_nvcomp_arrays(outputs, chunks_and_specs)

def compute_encoded_size(self, _input_byte_length: int, _chunk_spec: ArraySpec) -> int:
raise NotImplementedError


register_codec("zstd", NvcompZstdCodec)
25 changes: 21 additions & 4 deletions src/zarr/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,15 @@ def enable_gpu(self) -> ConfigSet:
Configure Zarr to use GPUs where possible.
"""
return self.set(
{"buffer": "zarr.core.buffer.gpu.Buffer", "ndbuffer": "zarr.core.buffer.gpu.NDBuffer"}
{
"buffer": "zarr.core.buffer.gpu.Buffer",
"ndbuffer": "zarr.core.buffer.gpu.NDBuffer",
"codecs": {"zstd": "zarr.codecs.gpu.NvcompZstdCodec"},
"codec_pipeline": {
"path": "zarr.core.codec_pipeline.BatchedCodecPipeline",
"batch_size": 65536,
},
}
)


Expand Down Expand Up @@ -96,13 +104,22 @@ def enable_gpu(self) -> ConfigSet:
},
"v3_default_compressors": {
"numeric": [
{"name": "zstd", "configuration": {"level": 0, "checksum": False}},
{
"name": "zstd",
"configuration": {"level": 0, "checksum": False},
},
],
"string": [
{"name": "zstd", "configuration": {"level": 0, "checksum": False}},
{
"name": "zstd",
"configuration": {"level": 0, "checksum": False},
},
],
"bytes": [
{"name": "zstd", "configuration": {"level": 0, "checksum": False}},
{
"name": "zstd",
"configuration": {"level": 0, "checksum": False},
},
],
},
},
Expand Down
7 changes: 5 additions & 2 deletions tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import zarr.api.asynchronous
import zarr.core.group
from zarr import Array, Group
from zarr.abc.codec import Codec
from zarr.abc.store import Store
from zarr.api.synchronous import (
create,
Expand All @@ -23,6 +24,7 @@
save_array,
save_group,
)
from zarr.codecs import NvcompZstdCodec
from zarr.core.common import JSON, MemoryOrder, ZarrFormat
from zarr.errors import MetadataValidationError
from zarr.storage import MemoryStore
Expand Down Expand Up @@ -1131,15 +1133,16 @@ def test_open_array_with_mode_r_plus(store: Store) -> None:
indirect=True,
)
@pytest.mark.parametrize("zarr_format", [None, 2, 3])
def test_gpu_basic(store: Store, zarr_format: ZarrFormat | None) -> None:
@pytest.mark.parametrize("codec", ["auto", NvcompZstdCodec()])
def test_gpu_basic(store: Store, zarr_format: ZarrFormat | None, codec: str | Codec) -> None:
import cupy as cp

if zarr_format == 2:
# Without this, the zstd codec attempts to convert the cupy
# array to bytes.
compressors = None
else:
compressors = "auto"
compressors = codec

with zarr.config.enable_gpu():
src = cp.random.uniform(size=(100, 100)) # allocate on the device
Expand Down