2 * Copyright (C) 2005 Junio C Hamano
10 #include "cache-tree.h"
11 #include "unpack-trees.h"
19 * Has the work tree entity been removed?
21 * Return 1 if it was removed from the work tree, 0 if an entity to be
22 * compared with the cache entry ce still exists (the latter includes
23 * the case where a directory that is not a submodule repository
24 * exists for ce that is a submodule -- it is a submodule that is not
25 * checked out). Return negative for an error.
27 static int check_removed(const struct cache_entry
*ce
, struct stat
*st
)
29 if (lstat(ce
->name
, st
) < 0) {
30 if (errno
!= ENOENT
&& errno
!= ENOTDIR
)
34 if (has_symlink_leading_path(ce
->name
, ce_namelen(ce
)))
36 if (S_ISDIR(st
->st_mode
)) {
37 unsigned char sub
[20];
40 * If ce is already a gitlink, we can have a plain
41 * directory (i.e. the submodule is not checked out),
42 * or a checked out submodule. Either case this is not
43 * a case where something was removed from the work tree,
44 * so we will return 0.
46 * Otherwise, if the directory is not a submodule
47 * repository, that means ce which was a blob turned into
48 * a directory --- the blob was removed!
50 if (!S_ISGITLINK(ce
->ce_mode
) &&
51 resolve_gitlink_ref(ce
->name
, "HEAD", sub
))
57 int run_diff_files(struct rev_info
*revs
, unsigned int option
)
60 int diff_unmerged_stage
= revs
->max_count
;
61 int silent_on_removed
= option
& DIFF_SILENT_ON_REMOVED
;
62 unsigned ce_option
= ((option
& DIFF_RACY_IS_MODIFIED
)
63 ? CE_MATCH_RACY_IS_DIRTY
: 0);
65 diff_set_mnemonic_prefix(&revs
->diffopt
, "i/", "w/");
67 if (diff_unmerged_stage
< 0)
68 diff_unmerged_stage
= 2;
70 for (i
= 0; i
< entries
; i
++) {
72 unsigned int oldmode
, newmode
;
73 struct cache_entry
*ce
= active_cache
[i
];
76 if (diff_can_quit_early(&revs
->diffopt
))
79 if (!ce_path_match(ce
, revs
->prune_data
))
83 struct combine_diff_path
*dpath
;
84 int num_compare_stages
= 0;
87 path_len
= ce_namelen(ce
);
89 dpath
= xmalloc(combine_diff_path_size(5, path_len
));
90 dpath
->path
= (char *) &(dpath
->parent
[5]);
93 dpath
->len
= path_len
;
94 memcpy(dpath
->path
, ce
->name
, path_len
);
95 dpath
->path
[path_len
] = '\0';
97 memset(&(dpath
->parent
[0]), 0,
98 sizeof(struct combine_diff_parent
)*5);
100 changed
= check_removed(ce
, &st
);
102 dpath
->mode
= ce_mode_from_stat(ce
, st
.st_mode
);
108 if (silent_on_removed
)
112 while (i
< entries
) {
113 struct cache_entry
*nce
= active_cache
[i
];
116 if (strcmp(ce
->name
, nce
->name
))
119 /* Stage #2 (ours) is the first parent,
120 * stage #3 (theirs) is the second.
122 stage
= ce_stage(nce
);
124 int mode
= nce
->ce_mode
;
125 num_compare_stages
++;
126 hashcpy(dpath
->parent
[stage
-2].sha1
, nce
->sha1
);
127 dpath
->parent
[stage
-2].mode
= ce_mode_from_stat(nce
, mode
);
128 dpath
->parent
[stage
-2].status
=
129 DIFF_STATUS_MODIFIED
;
132 /* diff against the proper unmerged stage */
133 if (stage
== diff_unmerged_stage
)
138 * Compensate for loop update
142 if (revs
->combine_merges
&& num_compare_stages
== 2) {
143 show_combined_diff(dpath
, 2,
144 revs
->dense_combined_merges
,
153 * Show the diff for the 'ce' if we found the one
154 * from the desired stage.
156 diff_unmerge(&revs
->diffopt
, ce
->name
, 0, null_sha1
);
157 if (ce_stage(ce
) != diff_unmerged_stage
)
164 changed
= check_removed(ce
, &st
);
170 if (silent_on_removed
)
172 diff_addremove(&revs
->diffopt
, '-', ce
->ce_mode
,
176 changed
= ce_match_stat(ce
, &st
, ce_option
);
178 ce_mark_uptodate(ce
);
179 if (!DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
))
182 oldmode
= ce
->ce_mode
;
183 newmode
= ce_mode_from_stat(ce
, st
.st_mode
);
184 diff_change(&revs
->diffopt
, oldmode
, newmode
,
185 ce
->sha1
, (changed
? null_sha1
: ce
->sha1
),
189 diffcore_std(&revs
->diffopt
);
190 diff_flush(&revs
->diffopt
);
198 /* A file entry went away or appeared */
199 static void diff_index_show_file(struct rev_info
*revs
,
201 struct cache_entry
*ce
,
202 const unsigned char *sha1
, unsigned int mode
)
204 diff_addremove(&revs
->diffopt
, prefix
[0], mode
,
208 static int get_stat_data(struct cache_entry
*ce
,
209 const unsigned char **sha1p
,
211 int cached
, int match_missing
)
213 const unsigned char *sha1
= ce
->sha1
;
214 unsigned int mode
= ce
->ce_mode
;
216 if (!cached
&& !ce_uptodate(ce
)) {
219 changed
= check_removed(ce
, &st
);
230 changed
= ce_match_stat(ce
, &st
, 0);
232 mode
= ce_mode_from_stat(ce
, st
.st_mode
);
242 static void show_new_file(struct rev_info
*revs
,
243 struct cache_entry
*new,
244 int cached
, int match_missing
)
246 const unsigned char *sha1
;
250 * New file in the index: it might actually be different in
253 if (get_stat_data(new, &sha1
, &mode
, cached
, match_missing
) < 0)
256 diff_index_show_file(revs
, "+", new, sha1
, mode
);
259 static int show_modified(struct rev_info
*revs
,
260 struct cache_entry
*old
,
261 struct cache_entry
*new,
263 int cached
, int match_missing
)
265 unsigned int mode
, oldmode
;
266 const unsigned char *sha1
;
268 if (get_stat_data(new, &sha1
, &mode
, cached
, match_missing
) < 0) {
270 diff_index_show_file(revs
, "-", old
,
271 old
->sha1
, old
->ce_mode
);
275 if (revs
->combine_merges
&& !cached
&&
276 (hashcmp(sha1
, old
->sha1
) || hashcmp(old
->sha1
, new->sha1
))) {
277 struct combine_diff_path
*p
;
278 int pathlen
= ce_namelen(new);
280 p
= xmalloc(combine_diff_path_size(2, pathlen
));
281 p
->path
= (char *) &p
->parent
[2];
284 memcpy(p
->path
, new->name
, pathlen
);
285 p
->path
[pathlen
] = 0;
288 memset(p
->parent
, 0, 2 * sizeof(struct combine_diff_parent
));
289 p
->parent
[0].status
= DIFF_STATUS_MODIFIED
;
290 p
->parent
[0].mode
= new->ce_mode
;
291 hashcpy(p
->parent
[0].sha1
, new->sha1
);
292 p
->parent
[1].status
= DIFF_STATUS_MODIFIED
;
293 p
->parent
[1].mode
= old
->ce_mode
;
294 hashcpy(p
->parent
[1].sha1
, old
->sha1
);
295 show_combined_diff(p
, 2, revs
->dense_combined_merges
, revs
);
300 oldmode
= old
->ce_mode
;
301 if (mode
== oldmode
&& !hashcmp(sha1
, old
->sha1
) &&
302 !DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
))
305 diff_change(&revs
->diffopt
, oldmode
, mode
,
306 old
->sha1
, sha1
, old
->name
);
311 * This turns all merge entries into "stage 3". That guarantees that
312 * when we read in the new tree (into "stage 1"), we won't lose sight
313 * of the fact that we had unmerged entries.
315 static void mark_merge_entries(void)
318 for (i
= 0; i
< active_nr
; i
++) {
319 struct cache_entry
*ce
= active_cache
[i
];
322 ce
->ce_flags
|= CE_STAGEMASK
;
327 * This gets a mix of an existing index and a tree, one pathname entry
328 * at a time. The index entry may be a single stage-0 one, but it could
329 * also be multiple unmerged entries (in which case idx_pos/idx_nr will
330 * give you the position and number of entries in the index).
332 static void do_oneway_diff(struct unpack_trees_options
*o
,
333 struct cache_entry
*idx
,
334 struct cache_entry
*tree
)
336 struct rev_info
*revs
= o
->unpack_data
;
337 int match_missing
, cached
;
340 * Backward compatibility wart - "diff-index -m" does
341 * not mean "do not ignore merges", but "match_missing".
343 * But with the revision flag parsing, that's found in
344 * "!revs->ignore_merges".
346 cached
= o
->index_only
;
347 match_missing
= !revs
->ignore_merges
;
349 if (cached
&& idx
&& ce_stage(idx
)) {
351 diff_unmerge(&revs
->diffopt
, idx
->name
, idx
->ce_mode
, idx
->sha1
);
356 * Something added to the tree?
359 show_new_file(revs
, idx
, cached
, match_missing
);
364 * Something removed from the tree?
367 diff_index_show_file(revs
, "-", tree
, tree
->sha1
, tree
->ce_mode
);
371 /* Show difference between old and new */
372 show_modified(revs
, tree
, idx
, 1, cached
, match_missing
);
375 static inline void skip_same_name(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
377 int len
= ce_namelen(ce
);
378 const struct index_state
*index
= o
->src_index
;
380 while (o
->pos
< index
->cache_nr
) {
381 struct cache_entry
*next
= index
->cache
[o
->pos
];
382 if (len
!= ce_namelen(next
))
384 if (memcmp(ce
->name
, next
->name
, len
))
391 * The unpack_trees() interface is designed for merging, so
392 * the different source entries are designed primarily for
393 * the source trees, with the old index being really mainly
394 * used for being replaced by the result.
396 * For diffing, the index is more important, and we only have a
399 * We're supposed to return how many index entries we want to skip.
401 * This wrapper makes it all more readable, and takes care of all
402 * the fairly complex unpack_trees() semantic requirements, including
403 * the skipping, the path matching, the type conflict cases etc.
405 static int oneway_diff(struct cache_entry
**src
, struct unpack_trees_options
*o
)
407 struct cache_entry
*idx
= src
[0];
408 struct cache_entry
*tree
= src
[1];
409 struct rev_info
*revs
= o
->unpack_data
;
411 if (idx
&& ce_stage(idx
))
412 skip_same_name(idx
, o
);
415 * Unpack-trees generates a DF/conflict entry if
416 * there was a directory in the index and a tree
417 * in the tree. From a diff standpoint, that's a
418 * delete of the tree and a create of the file.
420 if (tree
== o
->df_conflict_entry
)
423 if (ce_path_match(idx
? idx
: tree
, revs
->prune_data
))
424 do_oneway_diff(o
, idx
, tree
);
429 int run_diff_index(struct rev_info
*revs
, int cached
)
433 const char *tree_name
;
434 struct unpack_trees_options opts
;
437 mark_merge_entries();
439 ent
= revs
->pending
.objects
[0].item
;
440 tree_name
= revs
->pending
.objects
[0].name
;
441 tree
= parse_tree_indirect(ent
->sha1
);
443 return error("bad tree object %s", tree_name
);
445 memset(&opts
, 0, sizeof(opts
));
447 opts
.index_only
= cached
;
448 opts
.diff_index_cached
= (cached
&&
449 !DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
));
451 opts
.fn
= oneway_diff
;
452 opts
.unpack_data
= revs
;
453 opts
.src_index
= &the_index
;
454 opts
.dst_index
= NULL
;
456 init_tree_desc(&t
, tree
->buffer
, tree
->size
);
457 if (unpack_trees(1, &t
, &opts
))
460 diff_set_mnemonic_prefix(&revs
->diffopt
, "c/", cached
? "i/" : "w/");
461 diffcore_std(&revs
->diffopt
);
462 diff_flush(&revs
->diffopt
);
466 int do_diff_cache(const unsigned char *tree_sha1
, struct diff_options
*opt
)
469 struct rev_info revs
;
471 struct cache_entry
**dst
;
472 struct cache_entry
*last
= NULL
;
473 struct unpack_trees_options opts
;
477 * This is used by git-blame to run diff-cache internally;
478 * it potentially needs to repeatedly run this, so we will
479 * start by removing the higher order entries the last round
483 for (i
= 0; i
< active_nr
; i
++) {
484 struct cache_entry
*ce
= active_cache
[i
];
486 if (last
&& !strcmp(ce
->name
, last
->name
))
488 cache_tree_invalidate_path(active_cache_tree
,
491 ce
->ce_flags
|= CE_REMOVE
;
495 active_nr
= dst
- active_cache
;
497 init_revisions(&revs
, NULL
);
498 revs
.prune_data
= opt
->paths
;
499 tree
= parse_tree_indirect(tree_sha1
);
501 die("bad tree object %s", sha1_to_hex(tree_sha1
));
503 memset(&opts
, 0, sizeof(opts
));
506 opts
.diff_index_cached
= !DIFF_OPT_TST(opt
, FIND_COPIES_HARDER
);
508 opts
.fn
= oneway_diff
;
509 opts
.unpack_data
= &revs
;
510 opts
.src_index
= &the_index
;
511 opts
.dst_index
= &the_index
;
513 init_tree_desc(&t
, tree
->buffer
, tree
->size
);
514 if (unpack_trees(1, &t
, &opts
))
519 int index_differs_from(const char *def
, int diff_flags
)
523 init_revisions(&rev
, NULL
);
524 setup_revisions(0, NULL
, &rev
, def
);
525 DIFF_OPT_SET(&rev
.diffopt
, QUICK
);
526 DIFF_OPT_SET(&rev
.diffopt
, EXIT_WITH_STATUS
);
527 rev
.diffopt
.flags
|= diff_flags
;
528 run_diff_index(&rev
, 1);
529 if (rev
.pending
.alloc
)
530 free(rev
.pending
.objects
);
531 return (DIFF_OPT_TST(&rev
.diffopt
, HAS_CHANGES
) != 0);