@@ -577,6 +577,78 @@ static void fill_pack_entry(uint32_t pack_int_id,
577
577
entry -> preferred = !!preferred ;
578
578
}
579
579
580
+ struct midx_fanout {
581
+ struct pack_midx_entry * entries ;
582
+ uint32_t nr ;
583
+ uint32_t alloc ;
584
+ };
585
+
586
+ static void midx_fanout_grow (struct midx_fanout * fanout , uint32_t nr )
587
+ {
588
+ ALLOC_GROW (fanout -> entries , nr , fanout -> alloc );
589
+ }
590
+
591
+ static void midx_fanout_sort (struct midx_fanout * fanout )
592
+ {
593
+ QSORT (fanout -> entries , fanout -> nr , midx_oid_compare );
594
+ }
595
+
596
+ static void midx_fanout_add_midx_fanout (struct midx_fanout * fanout ,
597
+ struct multi_pack_index * m ,
598
+ uint32_t cur_fanout ,
599
+ int preferred_pack )
600
+ {
601
+ uint32_t start = 0 , end ;
602
+ uint32_t cur_object ;
603
+
604
+ if (cur_fanout )
605
+ start = ntohl (m -> chunk_oid_fanout [cur_fanout - 1 ]);
606
+ end = ntohl (m -> chunk_oid_fanout [cur_fanout ]);
607
+
608
+ for (cur_object = start ; cur_object < end ; cur_object ++ ) {
609
+ if ((preferred_pack > -1 ) &&
610
+ (preferred_pack == nth_midxed_pack_int_id (m , cur_object ))) {
611
+ /*
612
+ * Objects from preferred packs are added
613
+ * separately.
614
+ */
615
+ continue ;
616
+ }
617
+
618
+ midx_fanout_grow (fanout , fanout -> nr + 1 );
619
+ nth_midxed_pack_midx_entry (m ,
620
+ & fanout -> entries [fanout -> nr ],
621
+ cur_object );
622
+ fanout -> entries [fanout -> nr ].preferred = 0 ;
623
+ fanout -> nr ++ ;
624
+ }
625
+ }
626
+
627
+ static void midx_fanout_add_pack_fanout (struct midx_fanout * fanout ,
628
+ struct pack_info * info ,
629
+ uint32_t cur_pack ,
630
+ int preferred ,
631
+ uint32_t cur_fanout )
632
+ {
633
+ struct packed_git * pack = info [cur_pack ].p ;
634
+ uint32_t start = 0 , end ;
635
+ uint32_t cur_object ;
636
+
637
+ if (cur_fanout )
638
+ start = get_pack_fanout (pack , cur_fanout - 1 );
639
+ end = get_pack_fanout (pack , cur_fanout );
640
+
641
+ for (cur_object = start ; cur_object < end ; cur_object ++ ) {
642
+ midx_fanout_grow (fanout , fanout -> nr + 1 );
643
+ fill_pack_entry (cur_pack ,
644
+ info [cur_pack ].p ,
645
+ cur_object ,
646
+ & fanout -> entries [fanout -> nr ],
647
+ preferred );
648
+ fanout -> nr ++ ;
649
+ }
650
+ }
651
+
580
652
/*
581
653
* It is possible to artificially get into a state where there are many
582
654
* duplicate copies of objects. That can create high memory pressure if
@@ -595,8 +667,8 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
595
667
int preferred_pack )
596
668
{
597
669
uint32_t cur_fanout , cur_pack , cur_object ;
598
- uint32_t alloc_fanout , alloc_objects , total_objects = 0 ;
599
- struct pack_midx_entry * entries_by_fanout = NULL ;
670
+ uint32_t alloc_objects , total_objects = 0 ;
671
+ struct midx_fanout fanout = { 0 } ;
600
672
struct pack_midx_entry * deduplicated_entries = NULL ;
601
673
uint32_t start_pack = m ? m -> num_packs : 0 ;
602
674
@@ -608,74 +680,51 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
608
680
* slices to be evenly distributed, with some noise. Hence,
609
681
* allocate slightly more than one 256th.
610
682
*/
611
- alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16 ;
683
+ alloc_objects = fanout . alloc = total_objects > 3200 ? total_objects / 200 : 16 ;
612
684
613
- ALLOC_ARRAY (entries_by_fanout , alloc_fanout );
685
+ ALLOC_ARRAY (fanout . entries , fanout . alloc );
614
686
ALLOC_ARRAY (deduplicated_entries , alloc_objects );
615
687
* nr_objects = 0 ;
616
688
617
689
for (cur_fanout = 0 ; cur_fanout < 256 ; cur_fanout ++ ) {
618
- uint32_t nr_fanout = 0 ;
619
-
620
- if (m ) {
621
- uint32_t start = 0 , end ;
622
-
623
- if (cur_fanout )
624
- start = ntohl (m -> chunk_oid_fanout [cur_fanout - 1 ]);
625
- end = ntohl (m -> chunk_oid_fanout [cur_fanout ]);
626
-
627
- for (cur_object = start ; cur_object < end ; cur_object ++ ) {
628
- ALLOC_GROW (entries_by_fanout , nr_fanout + 1 , alloc_fanout );
629
- nth_midxed_pack_midx_entry (m ,
630
- & entries_by_fanout [nr_fanout ],
631
- cur_object );
632
- if (nth_midxed_pack_int_id (m , cur_object ) == preferred_pack )
633
- entries_by_fanout [nr_fanout ].preferred = 1 ;
634
- else
635
- entries_by_fanout [nr_fanout ].preferred = 0 ;
636
- nr_fanout ++ ;
637
- }
638
- }
690
+ fanout .nr = 0 ;
691
+
692
+ if (m )
693
+ midx_fanout_add_midx_fanout (& fanout , m , cur_fanout ,
694
+ preferred_pack );
639
695
640
696
for (cur_pack = start_pack ; cur_pack < nr_packs ; cur_pack ++ ) {
641
- uint32_t start = 0 , end ;
642
697
int preferred = cur_pack == preferred_pack ;
643
-
644
- if (cur_fanout )
645
- start = get_pack_fanout (info [cur_pack ].p , cur_fanout - 1 );
646
- end = get_pack_fanout (info [cur_pack ].p , cur_fanout );
647
-
648
- for (cur_object = start ; cur_object < end ; cur_object ++ ) {
649
- ALLOC_GROW (entries_by_fanout , nr_fanout + 1 , alloc_fanout );
650
- fill_pack_entry (cur_pack ,
651
- info [cur_pack ].p ,
652
- cur_object ,
653
- & entries_by_fanout [nr_fanout ],
654
- preferred );
655
- nr_fanout ++ ;
656
- }
698
+ midx_fanout_add_pack_fanout (& fanout ,
699
+ info , cur_pack ,
700
+ preferred , cur_fanout );
657
701
}
658
702
659
- QSORT (entries_by_fanout , nr_fanout , midx_oid_compare );
703
+ if (-1 < preferred_pack && preferred_pack < start_pack )
704
+ midx_fanout_add_pack_fanout (& fanout , info ,
705
+ preferred_pack , 1 ,
706
+ cur_fanout );
707
+
708
+ midx_fanout_sort (& fanout );
660
709
661
710
/*
662
711
* The batch is now sorted by OID and then mtime (descending).
663
712
* Take only the first duplicate.
664
713
*/
665
- for (cur_object = 0 ; cur_object < nr_fanout ; cur_object ++ ) {
666
- if (cur_object && oideq (& entries_by_fanout [cur_object - 1 ].oid ,
667
- & entries_by_fanout [cur_object ].oid ))
714
+ for (cur_object = 0 ; cur_object < fanout . nr ; cur_object ++ ) {
715
+ if (cur_object && oideq (& fanout . entries [cur_object - 1 ].oid ,
716
+ & fanout . entries [cur_object ].oid ))
668
717
continue ;
669
718
670
719
ALLOC_GROW (deduplicated_entries , * nr_objects + 1 , alloc_objects );
671
720
memcpy (& deduplicated_entries [* nr_objects ],
672
- & entries_by_fanout [cur_object ],
721
+ & fanout . entries [cur_object ],
673
722
sizeof (struct pack_midx_entry ));
674
723
(* nr_objects )++ ;
675
724
}
676
725
}
677
726
678
- free (entries_by_fanout );
727
+ free (fanout . entries );
679
728
return deduplicated_entries ;
680
729
}
681
730
0 commit comments