@@ -659,58 +659,74 @@ static void writer_clear_index(struct reftable_writer *w)
659
659
w -> index_cap = 0 ;
660
660
}
661
661
662
- static const int debug = 0 ;
663
-
664
662
static int writer_flush_nonempty_block (struct reftable_writer * w )
665
663
{
664
+ struct reftable_index_record index_record = {
665
+ .last_key = STRBUF_INIT ,
666
+ };
666
667
uint8_t typ = block_writer_type (w -> block_writer );
667
- struct reftable_block_stats * bstats =
668
- writer_reftable_block_stats (w , typ );
669
- uint64_t block_typ_off = (bstats -> blocks == 0 ) ? w -> next : 0 ;
670
- int raw_bytes = block_writer_finish (w -> block_writer );
671
- int padding = 0 ;
672
- int err = 0 ;
673
- struct reftable_index_record ir = { .last_key = STRBUF_INIT };
668
+ struct reftable_block_stats * bstats ;
669
+ int raw_bytes , padding = 0 , err ;
670
+ uint64_t block_typ_off ;
671
+
672
+ /*
673
+ * Finish the current block. This will cause the block writer to emit
674
+ * restart points and potentially compress records in case we are
675
+ * writing a log block.
676
+ *
677
+ * Note that this is still happening in memory.
678
+ */
679
+ raw_bytes = block_writer_finish (w -> block_writer );
674
680
if (raw_bytes < 0 )
675
681
return raw_bytes ;
676
682
677
- if (!w -> opts .unpadded && typ != BLOCK_TYPE_LOG ) {
683
+ /*
684
+ * By default, all records except for log records are padded to the
685
+ * block size.
686
+ */
687
+ if (!w -> opts .unpadded && typ != BLOCK_TYPE_LOG )
678
688
padding = w -> opts .block_size - raw_bytes ;
679
- }
680
689
681
- if (block_typ_off > 0 ) {
690
+ bstats = writer_reftable_block_stats (w , typ );
691
+ block_typ_off = (bstats -> blocks == 0 ) ? w -> next : 0 ;
692
+ if (block_typ_off > 0 )
682
693
bstats -> offset = block_typ_off ;
683
- }
684
-
685
694
bstats -> entries += w -> block_writer -> entries ;
686
695
bstats -> restarts += w -> block_writer -> restart_len ;
687
696
bstats -> blocks ++ ;
688
697
w -> stats .blocks ++ ;
689
698
690
- if (debug ) {
691
- fprintf (stderr , "block %c off %" PRIu64 " sz %d (%d)\n" , typ ,
692
- w -> next , raw_bytes ,
693
- get_be24 (w -> block + w -> block_writer -> header_off + 1 ));
694
- }
695
-
696
- if (w -> next == 0 ) {
699
+ /*
700
+ * If this is the first block we're writing to the table then we need
701
+ * to also write the reftable header.
702
+ */
703
+ if (!w -> next )
697
704
writer_write_header (w , w -> block );
698
- }
699
705
700
706
err = padded_write (w , w -> block , raw_bytes , padding );
701
707
if (err < 0 )
702
708
return err ;
703
709
710
+ /*
711
+ * Add an index record for every block that we're writing. If we end up
712
+ * having more than a threshold of index records we will end up writing
713
+ * an index section in `writer_finish_section()`. Each index record
714
+ * contains the last record key of the block it is indexing as well as
715
+ * the offset of that block.
716
+ *
717
+ * Note that this also applies when flushing index blocks, in which
718
+ * case we will end up with a multi-level index.
719
+ */
704
720
REFTABLE_ALLOC_GROW (w -> index , w -> index_len + 1 , w -> index_cap );
705
-
706
- ir .offset = w -> next ;
707
- strbuf_reset (& ir .last_key );
708
- strbuf_addbuf (& ir .last_key , & w -> block_writer -> last_key );
709
- w -> index [w -> index_len ] = ir ;
710
-
721
+ index_record .offset = w -> next ;
722
+ strbuf_reset (& index_record .last_key );
723
+ strbuf_addbuf (& index_record .last_key , & w -> block_writer -> last_key );
724
+ w -> index [w -> index_len ] = index_record ;
711
725
w -> index_len ++ ;
726
+
712
727
w -> next += padding + raw_bytes ;
713
728
w -> block_writer = NULL ;
729
+
714
730
return 0 ;
715
731
}
716
732
0 commit comments