@@ -425,7 +425,7 @@ static int read_one_entry_opt(struct index_state *istate,
425
425
const struct object_id * oid ,
426
426
const char * base , int baselen ,
427
427
const char * pathname ,
428
- unsigned mode , int stage , int opt )
428
+ unsigned mode , int opt )
429
429
{
430
430
int len ;
431
431
struct cache_entry * ce ;
@@ -437,7 +437,7 @@ static int read_one_entry_opt(struct index_state *istate,
437
437
ce = make_empty_cache_entry (istate , baselen + len );
438
438
439
439
ce -> ce_mode = create_ce_mode (mode );
440
- ce -> ce_flags = create_ce_flags (stage );
440
+ ce -> ce_flags = create_ce_flags (1 );
441
441
ce -> ce_namelen = baselen + len ;
442
442
memcpy (ce -> name , base , baselen );
443
443
memcpy (ce -> name + baselen , pathname , len + 1 );
@@ -451,7 +451,7 @@ static int read_one_entry(const struct object_id *oid, struct strbuf *base,
451
451
{
452
452
struct index_state * istate = context ;
453
453
return read_one_entry_opt (istate , oid , base -> buf , base -> len , pathname ,
454
- mode , stage ,
454
+ mode ,
455
455
ADD_CACHE_OK_TO_ADD |ADD_CACHE_SKIP_DFCHECK );
456
456
}
457
457
@@ -465,26 +465,17 @@ static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base
465
465
{
466
466
struct index_state * istate = context ;
467
467
return read_one_entry_opt (istate , oid , base -> buf , base -> len , pathname ,
468
- mode , stage ,
468
+ mode ,
469
469
ADD_CACHE_JUST_APPEND );
470
470
}
471
471
472
472
473
- static int read_tree (struct repository * r , struct tree * tree , int stage ,
473
+ static int read_tree (struct repository * r , struct tree * tree ,
474
474
struct pathspec * match , struct index_state * istate )
475
475
{
476
476
read_tree_fn_t fn = NULL ;
477
477
int i , err ;
478
478
479
- /*
480
- * Currently the only existing callers of this function all
481
- * call it with stage=1 and after making sure there is nothing
482
- * at that stage; we could always use read_one_entry_quick().
483
- *
484
- * But when we decide to straighten out git-read-tree not to
485
- * use unpack_trees() in some cases, this will probably start
486
- * to matter.
487
- */
488
479
489
480
/*
490
481
* See if we have cache entry at the stage. If so,
@@ -493,13 +484,13 @@ static int read_tree(struct repository *r, struct tree *tree, int stage,
493
484
*/
494
485
for (i = 0 ; !fn && i < istate -> cache_nr ; i ++ ) {
495
486
const struct cache_entry * ce = istate -> cache [i ];
496
- if (ce_stage (ce ) == stage )
487
+ if (ce_stage (ce ) == 1 )
497
488
fn = read_one_entry ;
498
489
}
499
490
500
491
if (!fn )
501
492
fn = read_one_entry_quick ;
502
- err = read_tree_recursive (r , tree , "" , 0 , stage , match , fn , istate );
493
+ err = read_tree_recursive (r , tree , "" , 0 , 0 , match , fn , istate );
503
494
if (fn == read_one_entry || err )
504
495
return err ;
505
496
@@ -549,7 +540,7 @@ void overlay_tree_on_index(struct index_state *istate,
549
540
PATHSPEC_PREFER_CWD , prefix , matchbuf );
550
541
} else
551
542
memset (& pathspec , 0 , sizeof (pathspec ));
552
- if (read_tree (the_repository , tree , 1 , & pathspec , istate ))
543
+ if (read_tree (the_repository , tree , & pathspec , istate ))
553
544
die ("unable to read tree entries %s" , tree_name );
554
545
555
546
for (i = 0 ; i < istate -> cache_nr ; i ++ ) {
0 commit comments