Skip to content

Commit

Permalink
Optimize "diff --cached" performance.
Browse files Browse the repository at this point in the history
The read_tree() function is called only from the call chain to
run "git diff --cached" (this includes the internal call made by
git-runstatus to run_diff_index()).  The function vacates stage
without any funky "merge" magic.  The caller then goes and
compares stage #1 entries from the tree with stage #0 entries
from the original index.

When adding the cache entries this way, it used the general
purpose add_cache_entry().  This function looks for an existing
entry to replace or if there is none to find where to insert the
new entry, resolves D/F conflict and all the other things.

For the purpose of reading entries into an empty stage, none of
that processing is needed.  We can instead append everything and
then sort the result at the end.

This commit changes read_tree() to first make sure that there is
no existing cache entries at specified stage, and if that is the
case, it runs add_cache_entry() with ADD_CACHE_JUST_APPEND flag
(new), and then sort the resulting cache using qsort().

This new flag tells add_cache_entry() to omit all the checks
such as "Does this path already exist?  Does adding this path
remove other existing entries because it turns a directory to a
file?" and instead append the given cache entry straight at the
end of the active cache.  The caller of course is expected to
sort the resulting cache at the end before using the result.

Signed-off-by: Junio C Hamano <gitster@pobox.com>
  • Loading branch information
gitster committed Aug 10, 2007
1 parent 63c21c4 commit af3785d
Show file tree
Hide file tree
Showing 3 changed files with 85 additions and 5 deletions.
1 change: 1 addition & 0 deletions cache.h
Expand Up @@ -258,6 +258,7 @@ extern int index_name_pos(struct index_state *, const char *name, int namelen);
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
#define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */
extern int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
extern struct cache_entry *refresh_cache_entry(struct cache_entry *ce, int really);
extern int remove_index_entry_at(struct index_state *, int pos);
Expand Down
20 changes: 18 additions & 2 deletions read-cache.c
Expand Up @@ -665,7 +665,7 @@ static int check_file_directory_conflict(struct index_state *istate,
return retval + has_dir_name(istate, ce, pos, ok_to_replace);
}

int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)
{
int pos;
int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
Expand Down Expand Up @@ -707,6 +707,22 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti
pos = index_name_pos(istate, ce->name, ntohs(ce->ce_flags));
pos = -pos-1;
}
return pos + 1;
}

int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
{
int pos;

if (option & ADD_CACHE_JUST_APPEND)
pos = istate->cache_nr;
else {
int ret;
ret = add_index_entry_with_check(istate, ce, option);
if (ret <= 0)
return ret;
pos = ret - 1;
}

/* Make sure the array is big enough .. */
if (istate->cache_nr == istate->cache_alloc) {
Expand All @@ -717,7 +733,7 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti

/* Add it in.. */
istate->cache_nr++;
if (istate->cache_nr > pos)
if (istate->cache_nr > pos + 1)
memmove(istate->cache + pos + 1,
istate->cache + pos,
(istate->cache_nr - pos - 1) * sizeof(ce));
Expand Down
69 changes: 66 additions & 3 deletions tree.c
@@ -1,4 +1,5 @@
#include "cache.h"
#include "cache-tree.h"
#include "tree.h"
#include "blob.h"
#include "commit.h"
Expand All @@ -7,7 +8,7 @@

const char *tree_type = "tree";

static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
{
int len;
unsigned int size;
Expand All @@ -25,7 +26,23 @@ static int read_one_entry(const unsigned char *sha1, const char *base, int basel
memcpy(ce->name, base, baselen);
memcpy(ce->name + baselen, pathname, len+1);
hashcpy(ce->sha1, sha1);
return add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
return add_cache_entry(ce, opt);
}

static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
{
return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
}

/*
* This is used when the caller knows there is no existing entries at
* the stage that will conflict with the entry being added.
*/
static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
{
return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
ADD_CACHE_JUST_APPEND);
}

static int match_tree_entry(const char *base, int baselen, const char *path, unsigned int mode, const char **paths)
Expand Down Expand Up @@ -119,9 +136,55 @@ int read_tree_recursive(struct tree *tree,
return 0;
}

static int cmp_cache_name_compare(const void *a_, const void *b_)
{
const struct cache_entry *ce1, *ce2;

ce1 = *((const struct cache_entry **)a_);
ce2 = *((const struct cache_entry **)b_);
return cache_name_compare(ce1->name, ntohs(ce1->ce_flags),
ce2->name, ntohs(ce2->ce_flags));
}

int read_tree(struct tree *tree, int stage, const char **match)
{
return read_tree_recursive(tree, "", 0, stage, match, read_one_entry);
read_tree_fn_t fn = NULL;
int i, err;

/*
* Currently the only existing callers of this function all
* call it with stage=1 and after making sure there is nothing
* at that stage; we could always use read_one_entry_quick().
*
* But when we decide to straighten out git-read-tree not to
* use unpack_trees() in some cases, this will probably start
* to matter.
*/

/*
* See if we have cache entry at the stage. If so,
* do it the original slow way, otherwise, append and then
* sort at the end.
*/
for (i = 0; !fn && i < active_nr; i++) {
struct cache_entry *ce = active_cache[i];
if (ce_stage(ce) == stage)
fn = read_one_entry;
}

if (!fn)
fn = read_one_entry_quick;
err = read_tree_recursive(tree, "", 0, stage, match, fn);
if (fn == read_one_entry || err)
return err;

/*
* Sort the cache entry -- we need to nuke the cache tree, though.
*/
cache_tree_free(&active_cache_tree);
qsort(active_cache, active_nr, sizeof(active_cache[0]),
cmp_cache_name_compare);
return 0;
}

struct tree *lookup_tree(const unsigned char *sha1)
Expand Down

0 comments on commit af3785d

Please sign in to comment.