|
1 |
| -# Rechunk a collection of necdf files on s3 into a single zarr store. |
| 1 | +# Rechunk a virtual zarr on s3 into a single zarr store using xarray-cubed. |
2 | 2 | #
|
3 |
| -# First, lithops and Virtualizarr construct a virtual dataset comprised of the |
4 |
| -# netcdf files on s3. Then, xarray-cubed rechunks the virtual dataset into a |
5 |
| -# zarr. |
| 3 | +# Prior to running this script, create the virtual zarr with |
| 4 | +# > python create-virtualzarr.py |
6 | 5 | #
|
7 |
| -# Inspired by Pythia's cookbook: https://projectpythia.org/kerchunk-cookbook |
8 |
| -# by norlandrhagen. |
9 |
| -# |
10 |
| -# Please, contribute improvements. |
| 6 | +# NOTE: In jupyter, open_dataset seems to cache the json, such that changes |
| 7 | +# aren't propogated until the kernel is restarted. |
11 | 8 |
|
12 |
| -import fsspec |
13 |
| -import lithops |
| 9 | +import os |
14 | 10 | import xarray as xr
|
15 | 11 |
|
16 |
| -from virtualizarr import open_virtual_dataset |
17 |
| - |
18 |
| -fs_read = fsspec.filesystem("s3", anon=True, skip_instance_cache=True) |
19 |
| -files_paths = fs_read.glob("s3://wrf-se-ak-ar5/ccsm/rcp85/daily/2060/*") |
20 |
| -file_pattern = sorted(["s3://" + f for f in files_paths]) |
21 |
| - |
22 |
| -# truncate file_pattern while debugging |
23 |
| -file_pattern = file_pattern[:4] |
24 |
| - |
25 |
| -print(f"{len(file_pattern)} file paths were retrieved.") |
26 |
| - |
27 |
| - |
28 |
| -def map_references(fil): |
29 |
| - """ Map function to open virtual datasets. |
30 |
| - """ |
31 |
| - vds = open_virtual_dataset(fil, |
32 |
| - indexes={}, |
33 |
| - loadable_variables=['Time'], |
34 |
| - cftime_variables=['Time'], |
35 |
| - ) |
36 |
| - return vds |
37 |
| - |
38 |
| - |
39 |
| -def reduce_references(results): |
40 |
| - """ Reduce to concat virtual datasets. |
| 12 | +bucket_url = os.getenv("BUCKET_URL") |
41 | 13 |
|
42 |
| - """ |
43 |
| - combined_vds = xr.combine_nested( |
44 |
| - results, |
45 |
| - concat_dim=["Time"], |
46 |
| - coords="minimal", |
47 |
| - compat="override", |
48 |
| - ) |
49 |
| - # possibly write parquet to s3 here |
50 |
| - return combined_vds |
51 |
| - |
52 |
| - |
53 |
| -fexec = lithops.FunctionExecutor(config_file="lithops.yaml") |
54 |
| - |
55 |
| -futures = fexec.map_reduce( |
56 |
| - map_references, |
57 |
| - file_pattern, |
58 |
| - reduce_references, |
59 |
| - spawn_reducer=100, |
| 14 | +combined_ds = xr.open_dataset( |
| 15 | + f"{bucket_url}/combined.json", # location must be accessible to workers |
| 16 | + engine="kerchunk", |
| 17 | + chunks={}, |
| 18 | + chunked_array_type="cubed", |
60 | 19 | )
|
61 | 20 |
|
62 |
| -ds = futures.get_result() |
63 |
| -ds.virtualize.to_kerchunk("combined.json", format="json") |
64 |
| - |
65 |
| -# NOTE: In jupyter, open_dataset seems to cache the json, such that changes |
66 |
| -# aren't propogated until the kernel is restarted. |
67 |
| -combined_ds = xr.open_dataset("combined.json", |
68 |
| - engine="kerchunk", |
69 |
| - chunks={}, |
70 |
| - chunked_array_type="cubed", |
71 |
| - ) |
72 |
| - |
73 |
| -combined_ds['Time'].attrs = {} # to_zarr complains about attrs |
| 21 | +combined_ds['Time'].attrs = {} # otherwise to_zarr complains about attrs |
74 | 22 |
|
75 | 23 | rechunked_ds = combined_ds.chunk(
|
76 | 24 | chunks={'Time': 5, 'south_north': 25, 'west_east': 32},
|
77 | 25 | chunked_array_type="cubed",
|
78 | 26 | )
|
79 | 27 |
|
80 |
| -rechunked_ds.to_zarr("rechunked.zarr", |
81 |
| - mode="w", |
82 |
| - encoding={}, # TODO |
83 |
| - consolidated=True, |
84 |
| - safe_chunks=False, |
85 |
| - ) |
| 28 | +rechunked_ds.to_zarr( |
| 29 | + f"{bucket_url}/rechunked.zarr", |
| 30 | + mode="w", |
| 31 | + encoding={}, # TODO |
| 32 | + consolidated=True, |
| 33 | + safe_chunks=False, |
| 34 | +) |
0 commit comments