@@ -209,39 +209,65 @@ int block_writer_finish(struct block_writer *w)
209
209
return w -> next ;
210
210
}
211
211
212
- int block_reader_init (struct block_reader * br , struct reftable_block * block ,
213
- uint32_t header_off , uint32_t table_block_size ,
214
- uint32_t hash_size )
212
+ static int read_block (struct reftable_block_source * source ,
213
+ struct reftable_block * dest , uint64_t off ,
214
+ uint32_t sz )
215
215
{
216
+ size_t size = block_source_size (source );
217
+ block_source_return_block (dest );
218
+ if (off >= size )
219
+ return 0 ;
220
+ if (off + sz > size )
221
+ sz = size - off ;
222
+ return block_source_read_block (source , dest , off , sz );
223
+ }
224
+
225
+ int block_reader_init (struct block_reader * br ,
226
+ struct reftable_block_source * source ,
227
+ uint32_t offset , uint32_t header_size ,
228
+ uint32_t table_block_size , uint32_t hash_size )
229
+ {
230
+ uint32_t guess_block_size = table_block_size ?
231
+ table_block_size : DEFAULT_BLOCK_SIZE ;
216
232
uint32_t full_block_size = table_block_size ;
217
- uint8_t typ = block -> data [header_off ];
218
- uint32_t sz = reftable_get_be24 (block -> data + header_off + 1 );
219
233
uint16_t restart_count ;
220
234
uint32_t restart_off ;
235
+ uint32_t block_size ;
236
+ uint8_t block_type ;
221
237
int err ;
222
238
223
- block_source_return_block (& br -> block );
239
+ err = read_block (source , & br -> block , offset , guess_block_size );
240
+ if (err < 0 )
241
+ goto done ;
224
242
225
- if (!reftable_is_block_type (typ )) {
226
- err = REFTABLE_FORMAT_ERROR ;
243
+ block_type = br -> block .data [header_size ];
244
+ if (!reftable_is_block_type (block_type )) {
245
+ err = REFTABLE_FORMAT_ERROR ;
227
246
goto done ;
228
247
}
229
248
230
- if (typ == BLOCK_TYPE_LOG ) {
231
- uint32_t block_header_skip = 4 + header_off ;
232
- uLong dst_len = sz - block_header_skip ;
233
- uLong src_len = block -> len - block_header_skip ;
249
+ block_size = reftable_get_be24 (br -> block .data + header_size + 1 );
250
+ if (block_size > guess_block_size ) {
251
+ err = read_block (source , & br -> block , offset , block_size );
252
+ if (err < 0 )
253
+ goto done ;
254
+ }
255
+
256
+ if (block_type == BLOCK_TYPE_LOG ) {
257
+ uint32_t block_header_skip = 4 + header_size ;
258
+ uLong dst_len = block_size - block_header_skip ;
259
+ uLong src_len = br -> block .len - block_header_skip ;
234
260
235
261
/* Log blocks specify the *uncompressed* size in their header. */
236
- REFTABLE_ALLOC_GROW_OR_NULL (br -> uncompressed_data , sz ,
262
+ REFTABLE_ALLOC_GROW_OR_NULL (br -> uncompressed_data , block_size ,
237
263
br -> uncompressed_cap );
238
264
if (!br -> uncompressed_data ) {
239
265
err = REFTABLE_OUT_OF_MEMORY_ERROR ;
240
266
goto done ;
241
267
}
242
268
243
269
/* Copy over the block header verbatim. It's not compressed. */
244
- memcpy (br -> uncompressed_data , block -> data , block_header_skip );
270
+ memcpy (br -> uncompressed_data , br -> block . data , block_header_skip );
245
271
246
272
if (!br -> zstream ) {
247
273
REFTABLE_CALLOC_ARRAY (br -> zstream , 1 );
@@ -259,7 +285,7 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
259
285
goto done ;
260
286
}
261
287
262
- br -> zstream -> next_in = block -> data + block_header_skip ;
288
+ br -> zstream -> next_in = br -> block . data + block_header_skip ;
263
289
br -> zstream -> avail_in = src_len ;
264
290
br -> zstream -> next_out = br -> uncompressed_data + block_header_skip ;
265
291
br -> zstream -> avail_out = dst_len ;
@@ -278,43 +304,41 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
278
304
}
279
305
err = 0 ;
280
306
281
- if (br -> zstream -> total_out + block_header_skip != sz ) {
307
+ if (br -> zstream -> total_out + block_header_skip != block_size ) {
282
308
err = REFTABLE_FORMAT_ERROR ;
283
309
goto done ;
284
310
}
285
311
286
312
/* We're done with the input data. */
287
- block_source_return_block (block );
288
- block -> data = br -> uncompressed_data ;
289
- block -> len = sz ;
313
+ block_source_return_block (& br -> block );
314
+ br -> block . data = br -> uncompressed_data ;
315
+ br -> block . len = block_size ;
290
316
full_block_size = src_len + block_header_skip - br -> zstream -> avail_in ;
291
317
} else if (full_block_size == 0 ) {
292
- full_block_size = sz ;
293
- } else if (sz < full_block_size && sz < block -> len &&
294
- block -> data [sz ] != 0 ) {
318
+ full_block_size = block_size ;
319
+ } else if (block_size < full_block_size && block_size < br -> block . len &&
320
+ br -> block . data [block_size ] != 0 ) {
295
321
/* If the block is smaller than the full block size, it is
296
322
padded (data followed by '\0') or the next block is
297
323
unaligned. */
298
- full_block_size = sz ;
324
+ full_block_size = block_size ;
299
325
}
300
326
301
- restart_count = reftable_get_be16 (block -> data + sz - 2 );
302
- restart_off = sz - 2 - 3 * restart_count ;
303
-
304
- /* transfer ownership. */
305
- br -> block = * block ;
306
- block -> data = NULL ;
307
- block -> len = 0 ;
327
+ restart_count = reftable_get_be16 (br -> block .data + block_size - 2 );
328
+ restart_off = block_size - 2 - 3 * restart_count ;
308
329
330
+ br -> block_type = block_type ;
309
331
br -> hash_size = hash_size ;
310
332
br -> restart_off = restart_off ;
311
333
br -> full_block_size = full_block_size ;
312
- br -> header_off = header_off ;
334
+ br -> header_off = header_size ;
313
335
br -> restart_count = restart_count ;
314
336
315
337
err = 0 ;
316
338
317
339
done :
340
+ if (err < 0 )
341
+ block_reader_release (br );
318
342
return err ;
319
343
}
320
344
@@ -324,6 +348,7 @@ void block_reader_release(struct block_reader *br)
324
348
reftable_free (br -> zstream );
325
349
reftable_free (br -> uncompressed_data );
326
350
block_source_return_block (& br -> block );
351
+ memset (br , 0 , sizeof (* br ));
327
352
}
328
353
329
354
uint8_t block_reader_type (const struct block_reader * r )
0 commit comments