Skip to content

Commit fe7de0d

Browse files
committed
remove voxel_size computation for block done array.
Now we simply set voxel size equal to the block size resulting in a single number per block. This makes aligning arrays in `neuroglancer` harder. Using the voxel_offset is no longer enough, and we will instead have to manually create affine transforms to align the datasets if desired
1 parent cfee0ef commit fe7de0d

File tree

1 file changed

+1
-22
lines changed

1 file changed

+1
-22
lines changed

volara/blockwise/blockwise.py

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -301,25 +301,6 @@ def init_block_array(self):
301301
"""
302302
# prepare blocks done ds
303303

304-
def cmin(a, b):
305-
return Coordinate([min(ai, bi) for ai, bi in zip(a, b)])
306-
307-
def cmax(a, b):
308-
return Coordinate([max(ai, bi) for ai, bi in zip(a, b)])
309-
310-
def gcd(a, b):
311-
while b:
312-
a, b = b, a % b
313-
return a
314-
315-
def cgcd(a, *bs):
316-
while len(bs) > 0:
317-
b = bs[0]
318-
bs = bs[1:]
319-
a, b = cmax(a, b), cmin(a, b)
320-
a = Coordinate([gcd(ai, bi) for ai, bi in zip(a, b)])
321-
return abs(a)
322-
323304
def get_dtype(write_roi, write_size):
324305
# need to factor in block offset, so use cantor number of last block
325306
# + 1 to be safe
@@ -332,9 +313,7 @@ def get_dtype(write_roi, write_size):
332313
f"Number of blocks ({num_blocks}) is too large for available data types."
333314
)
334315

335-
block_voxel_size = cgcd(
336-
self.write_roi.offset, self.write_size, self.write_roi.shape
337-
)
316+
block_voxel_size = self.write_size
338317

339318
try:
340319
prepare_ds(

0 commit comments

Comments
 (0)