@@ -475,11 +475,10 @@ static void paint_down(struct paint_info *info, const unsigned char *sha1,
475
475
free (tmp );
476
476
}
477
477
478
- static int mark_uninteresting (const char * refname ,
479
- const unsigned char * sha1 ,
478
+ static int mark_uninteresting (const char * refname , const struct object_id * oid ,
480
479
int flags , void * cb_data )
481
480
{
482
- struct commit * commit = lookup_commit_reference_gently (sha1 , 1 );
481
+ struct commit * commit = lookup_commit_reference_gently (oid -> hash , 1 );
483
482
if (!commit )
484
483
return 0 ;
485
484
commit -> object .flags |= UNINTERESTING ;
@@ -512,8 +511,6 @@ void assign_shallow_commits_to_refs(struct shallow_info *info,
512
511
unsigned int i , nr ;
513
512
int * shallow , nr_shallow = 0 ;
514
513
struct paint_info pi ;
515
- struct each_ref_fn_sha1_adapter wrapped_mark_uninteresting =
516
- {mark_uninteresting , NULL };
517
514
518
515
trace_printf_key (& trace_shallow , "shallow: assign_shallow_commits_to_refs\n" );
519
516
shallow = xmalloc (sizeof (* shallow ) * (info -> nr_ours + info -> nr_theirs ));
@@ -544,8 +541,8 @@ void assign_shallow_commits_to_refs(struct shallow_info *info,
544
541
* connect to old refs. If not (e.g. force ref updates) it'll
545
542
* have to go down to the current shallow commits.
546
543
*/
547
- head_ref (each_ref_fn_adapter , & wrapped_mark_uninteresting );
548
- for_each_ref (each_ref_fn_adapter , & wrapped_mark_uninteresting );
544
+ head_ref (mark_uninteresting , NULL );
545
+ for_each_ref (mark_uninteresting , NULL );
549
546
550
547
/* Mark potential bottoms so we won't go out of bound */
551
548
for (i = 0 ; i < nr_shallow ; i ++ ) {
@@ -586,12 +583,12 @@ struct commit_array {
586
583
int nr , alloc ;
587
584
};
588
585
589
- static int add_ref (const char * refname ,
590
- const unsigned char * sha1 , int flags , void * cb_data )
586
+ static int add_ref (const char * refname , const struct object_id * oid ,
587
+ int flags , void * cb_data )
591
588
{
592
589
struct commit_array * ca = cb_data ;
593
590
ALLOC_GROW (ca -> commits , ca -> nr + 1 , ca -> alloc );
594
- ca -> commits [ca -> nr ] = lookup_commit_reference_gently (sha1 , 1 );
591
+ ca -> commits [ca -> nr ] = lookup_commit_reference_gently (oid -> hash , 1 );
595
592
if (ca -> commits [ca -> nr ])
596
593
ca -> nr ++ ;
597
594
return 0 ;
@@ -620,8 +617,6 @@ static void post_assign_shallow(struct shallow_info *info,
620
617
int dst , i , j ;
621
618
int bitmap_nr = (info -> ref -> nr + 31 ) / 32 ;
622
619
struct commit_array ca ;
623
- struct each_ref_fn_sha1_adapter wrapped_add_ref =
624
- {add_ref , & ca };
625
620
626
621
trace_printf_key (& trace_shallow , "shallow: post_assign_shallow\n" );
627
622
if (ref_status )
@@ -645,8 +640,8 @@ static void post_assign_shallow(struct shallow_info *info,
645
640
info -> nr_theirs = dst ;
646
641
647
642
memset (& ca , 0 , sizeof (ca ));
648
- head_ref (each_ref_fn_adapter , & wrapped_add_ref );
649
- for_each_ref (each_ref_fn_adapter , & wrapped_add_ref );
643
+ head_ref (add_ref , & ca );
644
+ for_each_ref (add_ref , & ca );
650
645
651
646
/* Remove unreachable shallow commits from "ours" */
652
647
for (i = dst = 0 ; i < info -> nr_ours ; i ++ ) {
@@ -678,12 +673,10 @@ int delayed_reachability_test(struct shallow_info *si, int c)
678
673
679
674
if (!si -> commits ) {
680
675
struct commit_array ca ;
681
- struct each_ref_fn_sha1_adapter wrapped_add_ref =
682
- {add_ref , & ca };
683
676
684
677
memset (& ca , 0 , sizeof (ca ));
685
- head_ref (each_ref_fn_adapter , & wrapped_add_ref );
686
- for_each_ref (each_ref_fn_adapter , & wrapped_add_ref );
678
+ head_ref (add_ref , & ca );
679
+ for_each_ref (add_ref , & ca );
687
680
si -> commits = ca .commits ;
688
681
si -> nr_commits = ca .nr ;
689
682
}
0 commit comments