11# %% Import packages
22
33import itertools
4+ import math
45from pathlib import Path
56from bfio import BioReader
67import numpy as np
78from cloudvolume import CloudVolume
8- from cloudvolume .lib import touch
9+ from cloudvolume .lib import touch , Vec
910import json
11+ from neuroglancer .downsample import downsample_with_averaging
1012
1113# %% Define the path to the files
1214
5456
5557num_channels = br .shape [- 1 ]
5658data_type = "uint16"
57- chunk_size = [256 , 256 , 128 , 1 ]
58- volume_size = [br .shape [1 ], br .shape [0 ], br .shape [2 ]] # XYZ
59+ chunk_size = [256 , 256 , 128 ]
60+ volume_size = [br .shape [1 ], br .shape [0 ], br .shape [2 ]] # XYZ
5961
6062# %% Setup the cloudvolume info
6163info = CloudVolume .create_new_info (
6567 encoding = "raw" ,
6668 resolution = [size_x , size_y , size_z ],
6769 voxel_offset = [0 , 0 , 0 ],
68- chunk_size = chunk_size [: - 1 ] ,
70+ chunk_size = chunk_size ,
6971 volume_size = volume_size ,
72+ max_mip = 2 ,
73+ factor = Vec (2 , 2 , 2 ),
7074)
71- vol = CloudVolume ("file://" + str (OUTPUT_PATH ), info = info )
72- vol .provenance .description = "Example data conversion"
73- vol .commit_info ()
74- vol .commit_provenance ()
75+ vols = [ CloudVolume ("file://" + str (OUTPUT_PATH ), mip = i ) for i in range ( 3 )]
76+ vols [ 0 ] .provenance .description = "Example data conversion"
77+ vols [ 0 ] .commit_info ()
78+ vols [ 0 ] .commit_provenance ()
7579
7680# %% Setup somewhere to hold progress
7781progress_dir = OUTPUT_PATH / "progress"
7882progress_dir .mkdir (exist_ok = True )
7983
8084
8185# %% Functions for moving data
82- shape = np .array ([br .shape [1 ], br .shape [0 ], br .shape [2 ], br . shape [ 3 ] ])
83- chunk_shape = np .array ([1024 , 1024 , 512 , 1 ]) # this is for reading data
86+ shape = np .array ([br .shape [1 ], br .shape [0 ], br .shape [2 ]])
87+ chunk_shape = np .array ([1024 , 1024 , 512 ]) # this is for reading data
8488num_chunks_per_dim = np .ceil (shape / chunk_shape ).astype (int )
8589
8690
87- def chunked_reader (x_i , y_i , z_i , c ):
91+ def chunked_reader (x_i , y_i , z_i ):
8892 x_start , x_end = x_i * chunk_shape [0 ], min ((x_i + 1 ) * chunk_shape [0 ], shape [0 ])
8993 y_start , y_end = y_i * chunk_shape [1 ], min ((y_i + 1 ) * chunk_shape [1 ], shape [1 ])
9094 z_start , z_end = z_i * chunk_shape [2 ], min ((z_i + 1 ) * chunk_shape [2 ], shape [2 ])
9195
9296 # Read the chunk from the BioReader
93- chunk = br .read (
94- X = (x_start , x_end ), Y = (y_start , y_end ), Z = (z_start , z_end ), C = (c ,)
95- )
96- # Keep expanding dims until it is the same length as chunk_shape
97- while len (chunk .shape ) < len (chunk_shape ):
98- chunk = np .expand_dims (chunk , axis = - 1 )
97+ chunk = br .read (X = (x_start , x_end ), Y = (y_start , y_end ), Z = (z_start , z_end ))
98+
9999 # Return the chunk
100100 return chunk .swapaxes (0 , 1 )
101101
102102
103103def process (args ):
104- x_i , y_i , z_i , c = args
104+ x_i , y_i , z_i = args
105105 start = [x_i * chunk_shape [0 ], y_i * chunk_shape [1 ], z_i * chunk_shape [2 ]]
106106 end = [
107107 min ((x_i + 1 ) * chunk_shape [0 ], shape [0 ]),
@@ -110,29 +110,41 @@ def process(args):
110110 ]
111111 f_name = (
112112 progress_dir
113- / f"{ start [0 ]} -{ end [0 ]} _{ start [1 ]} -{ end [1 ]} _{ start [2 ]} -{ end [2 ]} _ { c } .done"
113+ / f"{ start [0 ]} -{ end [0 ]} _{ start [1 ]} -{ end [1 ]} _{ start [2 ]} -{ end [2 ]} .done"
114114 )
115115 if f_name .exists () and not OVERWRITE :
116116 return
117117 print ("Working on" , f_name )
118- rawdata = chunk = chunked_reader (x_i , y_i , z_i , c )
119- vol [start [0 ] : end [0 ], start [1 ] : end [1 ], start [2 ] : end [2 ], c ] = rawdata
118+ rawdata = chunked_reader (x_i , y_i , z_i )
119+ print (rawdata .shape )
120+ for mip_level in reversed (range (3 )):
121+ if mip_level == 0 :
122+ downsampled = rawdata
123+ ds_start = start
124+ ds_end = end
125+ else :
126+ downsampled = downsample_with_averaging (
127+ rawdata , [2 * mip_level , 2 * mip_level , 2 * mip_level , 1 ]
128+ )
129+ ds_start = [int (math .ceil (s / (2 * mip_level ))) for s in start ]
130+ ds_end = [int (math .ceil (e / (2 * mip_level ))) for e in end ]
131+
132+ vols [mip_level ][
133+ ds_start [0 ] : ds_end [0 ], ds_start [1 ] : ds_end [1 ], ds_start [2 ] : ds_end [2 ]
134+ ] = downsampled
120135 touch (f_name )
121136
122137
123138# %% Try with a single chunk to see if it works
124139# x_i, y_i, z_i = 0, 0, 0
125- # process((x_i, y_i, z_i, 0 ))
140+ # process((x_i, y_i, z_i))
126141
127- # %% Can't figure out the writing so do it with fake data
128- # fake_data = np.random.randint(0, 2**16, size=chunk_size, dtype=np.uint16)
129- # vol[0:256, 0:256, 0:128, 0] = fake_data
142+ # %% Loop over all the chunks
130143
131144coords = itertools .product (
132145 range (num_chunks_per_dim [0 ]),
133146 range (num_chunks_per_dim [1 ]),
134147 range (num_chunks_per_dim [2 ]),
135- range (num_channels ),
136148)
137149# Do it in reverse order because the last chunks are most likely to error
138150reversed_coords = list (coords )
@@ -149,4 +161,6 @@ def process(args):
149161 process (coord )
150162
151163# %% Serve the dataset to be used in neuroglancer
152- vol .viewer (port = 1337 )
164+ vols [0 ].viewer (port = 1337 )
165+
166+ # %%
0 commit comments