@@ -423,7 +423,7 @@ static int get_common_prefix_len(const char *common_prefix)
423
423
424
424
static int read_one_entry_opt (struct index_state * istate ,
425
425
const struct object_id * oid ,
426
- const char * base , int baselen ,
426
+ struct strbuf * base ,
427
427
const char * pathname ,
428
428
unsigned mode , int opt )
429
429
{
@@ -434,13 +434,13 @@ static int read_one_entry_opt(struct index_state *istate,
434
434
return READ_TREE_RECURSIVE ;
435
435
436
436
len = strlen (pathname );
437
- ce = make_empty_cache_entry (istate , baselen + len );
437
+ ce = make_empty_cache_entry (istate , base -> len + len );
438
438
439
439
ce -> ce_mode = create_ce_mode (mode );
440
440
ce -> ce_flags = create_ce_flags (1 );
441
- ce -> ce_namelen = baselen + len ;
442
- memcpy (ce -> name , base , baselen );
443
- memcpy (ce -> name + baselen , pathname , len + 1 );
441
+ ce -> ce_namelen = base -> len + len ;
442
+ memcpy (ce -> name , base -> buf , base -> len );
443
+ memcpy (ce -> name + base -> len , pathname , len + 1 );
444
444
oidcpy (& ce -> oid , oid );
445
445
return add_index_entry (istate , ce , opt );
446
446
}
@@ -450,7 +450,7 @@ static int read_one_entry(const struct object_id *oid, struct strbuf *base,
450
450
void * context )
451
451
{
452
452
struct index_state * istate = context ;
453
- return read_one_entry_opt (istate , oid , base -> buf , base -> len , pathname ,
453
+ return read_one_entry_opt (istate , oid , base , pathname ,
454
454
mode ,
455
455
ADD_CACHE_OK_TO_ADD |ADD_CACHE_SKIP_DFCHECK );
456
456
}
@@ -464,42 +464,8 @@ static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base
464
464
void * context )
465
465
{
466
466
struct index_state * istate = context ;
467
- return read_one_entry_opt (istate , oid , base -> buf , base -> len , pathname ,
468
- mode ,
469
- ADD_CACHE_JUST_APPEND );
470
- }
471
-
472
-
473
- static int read_tree (struct repository * r , struct tree * tree ,
474
- struct pathspec * match , struct index_state * istate )
475
- {
476
- read_tree_fn_t fn = NULL ;
477
- int i , err ;
478
-
479
-
480
- /*
481
- * See if we have cache entry at the stage. If so,
482
- * do it the original slow way, otherwise, append and then
483
- * sort at the end.
484
- */
485
- for (i = 0 ; !fn && i < istate -> cache_nr ; i ++ ) {
486
- const struct cache_entry * ce = istate -> cache [i ];
487
- if (ce_stage (ce ) == 1 )
488
- fn = read_one_entry ;
489
- }
490
-
491
- if (!fn )
492
- fn = read_one_entry_quick ;
493
- err = read_tree_recursive (r , tree , "" , 0 , 0 , match , fn , istate );
494
- if (fn == read_one_entry || err )
495
- return err ;
496
-
497
- /*
498
- * Sort the cache entry -- we need to nuke the cache tree, though.
499
- */
500
- cache_tree_free (& istate -> cache_tree );
501
- QSORT (istate -> cache , istate -> cache_nr , cmp_cache_name_compare );
502
- return 0 ;
467
+ return read_one_entry_opt (istate , oid , base , pathname ,
468
+ mode , ADD_CACHE_JUST_APPEND );
503
469
}
504
470
505
471
/*
@@ -518,6 +484,8 @@ void overlay_tree_on_index(struct index_state *istate,
518
484
struct pathspec pathspec ;
519
485
struct cache_entry * last_stage0 = NULL ;
520
486
int i ;
487
+ read_tree_fn_t fn = NULL ;
488
+ int err ;
521
489
522
490
if (get_oid (tree_name , & oid ))
523
491
die ("tree-ish %s not found." , tree_name );
@@ -540,9 +508,32 @@ void overlay_tree_on_index(struct index_state *istate,
540
508
PATHSPEC_PREFER_CWD , prefix , matchbuf );
541
509
} else
542
510
memset (& pathspec , 0 , sizeof (pathspec ));
543
- if (read_tree (the_repository , tree , & pathspec , istate ))
511
+
512
+ /*
513
+ * See if we have cache entry at the stage. If so,
514
+ * do it the original slow way, otherwise, append and then
515
+ * sort at the end.
516
+ */
517
+ for (i = 0 ; !fn && i < istate -> cache_nr ; i ++ ) {
518
+ const struct cache_entry * ce = istate -> cache [i ];
519
+ if (ce_stage (ce ) == 1 )
520
+ fn = read_one_entry ;
521
+ }
522
+
523
+ if (!fn )
524
+ fn = read_one_entry_quick ;
525
+ err = read_tree_recursive (the_repository , tree , "" , 0 , 1 , & pathspec , fn , istate );
526
+ if (err )
544
527
die ("unable to read tree entries %s" , tree_name );
545
528
529
+ /*
530
+ * Sort the cache entry -- we need to nuke the cache tree, though.
531
+ */
532
+ if (fn == read_one_entry_quick ) {
533
+ cache_tree_free (& istate -> cache_tree );
534
+ QSORT (istate -> cache , istate -> cache_nr , cmp_cache_name_compare );
535
+ }
536
+
546
537
for (i = 0 ; i < istate -> cache_nr ; i ++ ) {
547
538
struct cache_entry * ce = istate -> cache [i ];
548
539
switch (ce_stage (ce )) {
0 commit comments