@@ -296,17 +296,6 @@ def _merge_chunk_array(
296296 is_complete_chunk : bool ,
297297 drop_axes : tuple [int , ...],
298298 ) -> NDBuffer :
299- if is_complete_chunk and value .shape == chunk_spec .shape :
300- return value
301- if existing_chunk_array is None :
302- chunk_array = chunk_spec .prototype .nd_buffer .create (
303- shape = chunk_spec .shape ,
304- dtype = chunk_spec .dtype ,
305- order = chunk_spec .order ,
306- fill_value = fill_value_or_default (chunk_spec ),
307- )
308- else :
309- chunk_array = existing_chunk_array .copy () # make a writable copy
310299 if chunk_selection == () or is_scalar (value .as_ndarray_like (), chunk_spec .dtype ):
311300 chunk_value = value
312301 else :
@@ -320,6 +309,20 @@ def _merge_chunk_array(
320309 for idx in range (chunk_spec .ndim )
321310 )
322311 chunk_value = chunk_value [item ]
312+ if is_complete_chunk and chunk_value .shape == chunk_spec .shape :
313+ # TODO: For the last chunk, we could have is_complete_chunk=True
314+ # that is smaller than the chunk_spec.shape but this throws
315+ # an error in the _decode_single
316+ return chunk_value
317+ if existing_chunk_array is None :
318+ chunk_array = chunk_spec .prototype .nd_buffer .create (
319+ shape = chunk_spec .shape ,
320+ dtype = chunk_spec .dtype ,
321+ order = chunk_spec .order ,
322+ fill_value = fill_value_or_default (chunk_spec ),
323+ )
324+ else :
325+ chunk_array = existing_chunk_array .copy () # make a writable copy
323326 chunk_array [chunk_selection ] = chunk_value
324327 return chunk_array
325328
0 commit comments