Skip to content

Commit 9614ad3

Browse files
avargitster
authored andcommitted
ls-files: refactor away read_tree()
Refactor away the read_tree() function into its only user, overlay_tree_on_index(). First, change read_one_entry_opt() to use the strbuf parameter read_tree_recursive() passes down in place. This finishes up a partial refactoring started in 6a0b0b6 (tree.c: update read_tree_recursive callback to pass strbuf as base, 2014-11-30). Moving the rest into overlay_tree_on_index() makes this index juggling we're doing easier to read. Signed-off-by: Ævar Arnfjörð Bjarmason <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
1 parent fcc7c12 commit 9614ad3

File tree

1 file changed

+34
-43
lines changed

1 file changed

+34
-43
lines changed

builtin/ls-files.c

Lines changed: 34 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ static int get_common_prefix_len(const char *common_prefix)
423423

424424
static int read_one_entry_opt(struct index_state *istate,
425425
const struct object_id *oid,
426-
const char *base, int baselen,
426+
struct strbuf *base,
427427
const char *pathname,
428428
unsigned mode, int opt)
429429
{
@@ -434,13 +434,13 @@ static int read_one_entry_opt(struct index_state *istate,
434434
return READ_TREE_RECURSIVE;
435435

436436
len = strlen(pathname);
437-
ce = make_empty_cache_entry(istate, baselen + len);
437+
ce = make_empty_cache_entry(istate, base->len + len);
438438

439439
ce->ce_mode = create_ce_mode(mode);
440440
ce->ce_flags = create_ce_flags(1);
441-
ce->ce_namelen = baselen + len;
442-
memcpy(ce->name, base, baselen);
443-
memcpy(ce->name + baselen, pathname, len+1);
441+
ce->ce_namelen = base->len + len;
442+
memcpy(ce->name, base->buf, base->len);
443+
memcpy(ce->name + base->len, pathname, len+1);
444444
oidcpy(&ce->oid, oid);
445445
return add_index_entry(istate, ce, opt);
446446
}
@@ -450,7 +450,7 @@ static int read_one_entry(const struct object_id *oid, struct strbuf *base,
450450
void *context)
451451
{
452452
struct index_state *istate = context;
453-
return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
453+
return read_one_entry_opt(istate, oid, base, pathname,
454454
mode,
455455
ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
456456
}
@@ -464,42 +464,8 @@ static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base
464464
void *context)
465465
{
466466
struct index_state *istate = context;
467-
return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
468-
mode,
469-
ADD_CACHE_JUST_APPEND);
470-
}
471-
472-
473-
static int read_tree(struct repository *r, struct tree *tree,
474-
struct pathspec *match, struct index_state *istate)
475-
{
476-
read_tree_fn_t fn = NULL;
477-
int i, err;
478-
479-
480-
/*
481-
* See if we have cache entry at the stage. If so,
482-
* do it the original slow way, otherwise, append and then
483-
* sort at the end.
484-
*/
485-
for (i = 0; !fn && i < istate->cache_nr; i++) {
486-
const struct cache_entry *ce = istate->cache[i];
487-
if (ce_stage(ce) == 1)
488-
fn = read_one_entry;
489-
}
490-
491-
if (!fn)
492-
fn = read_one_entry_quick;
493-
err = read_tree_recursive(r, tree, "", 0, 0, match, fn, istate);
494-
if (fn == read_one_entry || err)
495-
return err;
496-
497-
/*
498-
* Sort the cache entry -- we need to nuke the cache tree, though.
499-
*/
500-
cache_tree_free(&istate->cache_tree);
501-
QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
502-
return 0;
467+
return read_one_entry_opt(istate, oid, base, pathname,
468+
mode, ADD_CACHE_JUST_APPEND);
503469
}
504470

505471
/*
@@ -518,6 +484,8 @@ void overlay_tree_on_index(struct index_state *istate,
518484
struct pathspec pathspec;
519485
struct cache_entry *last_stage0 = NULL;
520486
int i;
487+
read_tree_fn_t fn = NULL;
488+
int err;
521489

522490
if (get_oid(tree_name, &oid))
523491
die("tree-ish %s not found.", tree_name);
@@ -540,9 +508,32 @@ void overlay_tree_on_index(struct index_state *istate,
540508
PATHSPEC_PREFER_CWD, prefix, matchbuf);
541509
} else
542510
memset(&pathspec, 0, sizeof(pathspec));
543-
if (read_tree(the_repository, tree, &pathspec, istate))
511+
512+
/*
513+
* See if we have cache entry at the stage. If so,
514+
* do it the original slow way, otherwise, append and then
515+
* sort at the end.
516+
*/
517+
for (i = 0; !fn && i < istate->cache_nr; i++) {
518+
const struct cache_entry *ce = istate->cache[i];
519+
if (ce_stage(ce) == 1)
520+
fn = read_one_entry;
521+
}
522+
523+
if (!fn)
524+
fn = read_one_entry_quick;
525+
err = read_tree_recursive(the_repository, tree, "", 0, 1, &pathspec, fn, istate);
526+
if (err)
544527
die("unable to read tree entries %s", tree_name);
545528

529+
/*
530+
* Sort the cache entry -- we need to nuke the cache tree, though.
531+
*/
532+
if (fn == read_one_entry_quick) {
533+
cache_tree_free(&istate->cache_tree);
534+
QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
535+
}
536+
546537
for (i = 0; i < istate->cache_nr; i++) {
547538
struct cache_entry *ce = istate->cache[i];
548539
switch (ce_stage(ce)) {

0 commit comments

Comments
 (0)