2 * Copyright (C) 2005 Junio C Hamano
10 #include "cache-tree.h"
11 #include "unpack-trees.h"
19 * Has the work tree entity been removed?
21 * Return 1 if it was removed from the work tree, 0 if an entity to be
22 * compared with the cache entry ce still exists (the latter includes
23 * the case where a directory that is not a submodule repository
24 * exists for ce that is a submodule -- it is a submodule that is not
25 * checked out). Return negative for an error.
27 static int check_removed(const struct cache_entry
*ce
, struct stat
*st
)
29 if (lstat(ce
->name
, st
) < 0) {
30 if (errno
!= ENOENT
&& errno
!= ENOTDIR
)
34 if (has_symlink_leading_path(ce
->name
, ce_namelen(ce
)))
36 if (S_ISDIR(st
->st_mode
)) {
37 unsigned char sub
[20];
40 * If ce is already a gitlink, we can have a plain
41 * directory (i.e. the submodule is not checked out),
42 * or a checked out submodule. Either case this is not
43 * a case where something was removed from the work tree,
44 * so we will return 0.
46 * Otherwise, if the directory is not a submodule
47 * repository, that means ce which was a blob turned into
48 * a directory --- the blob was removed!
50 if (!S_ISGITLINK(ce
->ce_mode
) &&
51 resolve_gitlink_ref(ce
->name
, "HEAD", sub
))
57 int run_diff_files(struct rev_info
*revs
, unsigned int option
)
60 int diff_unmerged_stage
= revs
->max_count
;
61 int silent_on_removed
= option
& DIFF_SILENT_ON_REMOVED
;
62 unsigned ce_option
= ((option
& DIFF_RACY_IS_MODIFIED
)
63 ? CE_MATCH_RACY_IS_DIRTY
: 0);
65 diff_set_mnemonic_prefix(&revs
->diffopt
, "i/", "w/");
67 if (diff_unmerged_stage
< 0)
68 diff_unmerged_stage
= 2;
70 for (i
= 0; i
< entries
; i
++) {
72 unsigned int oldmode
, newmode
;
73 struct cache_entry
*ce
= active_cache
[i
];
76 if (DIFF_OPT_TST(&revs
->diffopt
, QUIET
) &&
77 DIFF_OPT_TST(&revs
->diffopt
, HAS_CHANGES
))
80 if (!ce_path_match(ce
, revs
->prune_data
))
84 struct combine_diff_path
*dpath
;
85 int num_compare_stages
= 0;
88 path_len
= ce_namelen(ce
);
90 dpath
= xmalloc(combine_diff_path_size(5, path_len
));
91 dpath
->path
= (char *) &(dpath
->parent
[5]);
94 dpath
->len
= path_len
;
95 memcpy(dpath
->path
, ce
->name
, path_len
);
96 dpath
->path
[path_len
] = '\0';
98 memset(&(dpath
->parent
[0]), 0,
99 sizeof(struct combine_diff_parent
)*5);
101 changed
= check_removed(ce
, &st
);
103 dpath
->mode
= ce_mode_from_stat(ce
, st
.st_mode
);
109 if (silent_on_removed
)
113 while (i
< entries
) {
114 struct cache_entry
*nce
= active_cache
[i
];
117 if (strcmp(ce
->name
, nce
->name
))
120 /* Stage #2 (ours) is the first parent,
121 * stage #3 (theirs) is the second.
123 stage
= ce_stage(nce
);
125 int mode
= nce
->ce_mode
;
126 num_compare_stages
++;
127 hashcpy(dpath
->parent
[stage
-2].sha1
, nce
->sha1
);
128 dpath
->parent
[stage
-2].mode
= ce_mode_from_stat(nce
, mode
);
129 dpath
->parent
[stage
-2].status
=
130 DIFF_STATUS_MODIFIED
;
133 /* diff against the proper unmerged stage */
134 if (stage
== diff_unmerged_stage
)
139 * Compensate for loop update
143 if (revs
->combine_merges
&& num_compare_stages
== 2) {
144 show_combined_diff(dpath
, 2,
145 revs
->dense_combined_merges
,
154 * Show the diff for the 'ce' if we found the one
155 * from the desired stage.
157 diff_unmerge(&revs
->diffopt
, ce
->name
, 0, null_sha1
);
158 if (ce_stage(ce
) != diff_unmerged_stage
)
162 if (ce_uptodate(ce
) || ce_skip_worktree(ce
))
165 /* If CE_VALID is set, don't look at workdir for file removal */
166 changed
= (ce
->ce_flags
& CE_VALID
) ? 0 : check_removed(ce
, &st
);
172 if (silent_on_removed
)
174 diff_addremove(&revs
->diffopt
, '-', ce
->ce_mode
,
178 changed
= ce_match_stat(ce
, &st
, ce_option
);
180 ce_mark_uptodate(ce
);
181 if (!DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
))
184 oldmode
= ce
->ce_mode
;
185 newmode
= ce_mode_from_stat(ce
, st
.st_mode
);
186 diff_change(&revs
->diffopt
, oldmode
, newmode
,
187 ce
->sha1
, (changed
? null_sha1
: ce
->sha1
),
191 diffcore_std(&revs
->diffopt
);
192 diff_flush(&revs
->diffopt
);
200 /* A file entry went away or appeared */
201 static void diff_index_show_file(struct rev_info
*revs
,
203 struct cache_entry
*ce
,
204 const unsigned char *sha1
, unsigned int mode
)
206 diff_addremove(&revs
->diffopt
, prefix
[0], mode
,
210 static int get_stat_data(struct cache_entry
*ce
,
211 const unsigned char **sha1p
,
213 int cached
, int match_missing
)
215 const unsigned char *sha1
= ce
->sha1
;
216 unsigned int mode
= ce
->ce_mode
;
218 if (!cached
&& !ce_uptodate(ce
)) {
221 changed
= check_removed(ce
, &st
);
232 changed
= ce_match_stat(ce
, &st
, 0);
234 mode
= ce_mode_from_stat(ce
, st
.st_mode
);
244 static void show_new_file(struct rev_info
*revs
,
245 struct cache_entry
*new,
246 int cached
, int match_missing
)
248 const unsigned char *sha1
;
252 * New file in the index: it might actually be different in
255 if (get_stat_data(new, &sha1
, &mode
, cached
, match_missing
) < 0)
258 diff_index_show_file(revs
, "+", new, sha1
, mode
);
261 static int show_modified(struct rev_info
*revs
,
262 struct cache_entry
*old
,
263 struct cache_entry
*new,
265 int cached
, int match_missing
)
267 unsigned int mode
, oldmode
;
268 const unsigned char *sha1
;
270 if (get_stat_data(new, &sha1
, &mode
, cached
, match_missing
) < 0) {
272 diff_index_show_file(revs
, "-", old
,
273 old
->sha1
, old
->ce_mode
);
277 if (revs
->combine_merges
&& !cached
&&
278 (hashcmp(sha1
, old
->sha1
) || hashcmp(old
->sha1
, new->sha1
))) {
279 struct combine_diff_path
*p
;
280 int pathlen
= ce_namelen(new);
282 p
= xmalloc(combine_diff_path_size(2, pathlen
));
283 p
->path
= (char *) &p
->parent
[2];
286 memcpy(p
->path
, new->name
, pathlen
);
287 p
->path
[pathlen
] = 0;
290 memset(p
->parent
, 0, 2 * sizeof(struct combine_diff_parent
));
291 p
->parent
[0].status
= DIFF_STATUS_MODIFIED
;
292 p
->parent
[0].mode
= new->ce_mode
;
293 hashcpy(p
->parent
[0].sha1
, new->sha1
);
294 p
->parent
[1].status
= DIFF_STATUS_MODIFIED
;
295 p
->parent
[1].mode
= old
->ce_mode
;
296 hashcpy(p
->parent
[1].sha1
, old
->sha1
);
297 show_combined_diff(p
, 2, revs
->dense_combined_merges
, revs
);
302 oldmode
= old
->ce_mode
;
303 if (mode
== oldmode
&& !hashcmp(sha1
, old
->sha1
) &&
304 !DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
))
307 diff_change(&revs
->diffopt
, oldmode
, mode
,
308 old
->sha1
, sha1
, old
->name
);
313 * This turns all merge entries into "stage 3". That guarantees that
314 * when we read in the new tree (into "stage 1"), we won't lose sight
315 * of the fact that we had unmerged entries.
317 static void mark_merge_entries(void)
320 for (i
= 0; i
< active_nr
; i
++) {
321 struct cache_entry
*ce
= active_cache
[i
];
324 ce
->ce_flags
|= CE_STAGEMASK
;
329 * This gets a mix of an existing index and a tree, one pathname entry
330 * at a time. The index entry may be a single stage-0 one, but it could
331 * also be multiple unmerged entries (in which case idx_pos/idx_nr will
332 * give you the position and number of entries in the index).
334 static void do_oneway_diff(struct unpack_trees_options
*o
,
335 struct cache_entry
*idx
,
336 struct cache_entry
*tree
)
338 struct rev_info
*revs
= o
->unpack_data
;
339 int match_missing
, cached
;
341 /* if the entry is not checked out, don't examine work tree */
342 cached
= o
->index_only
||
343 (idx
&& ((idx
->ce_flags
& CE_VALID
) || ce_skip_worktree(idx
)));
345 * Backward compatibility wart - "diff-index -m" does
346 * not mean "do not ignore merges", but "match_missing".
348 * But with the revision flag parsing, that's found in
349 * "!revs->ignore_merges".
351 match_missing
= !revs
->ignore_merges
;
353 if (cached
&& idx
&& ce_stage(idx
)) {
355 diff_unmerge(&revs
->diffopt
, idx
->name
, idx
->ce_mode
, idx
->sha1
);
360 * Something added to the tree?
363 show_new_file(revs
, idx
, cached
, match_missing
);
368 * Something removed from the tree?
371 diff_index_show_file(revs
, "-", tree
, tree
->sha1
, tree
->ce_mode
);
375 /* Show difference between old and new */
376 show_modified(revs
, tree
, idx
, 1, cached
, match_missing
);
379 static inline void skip_same_name(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
381 int len
= ce_namelen(ce
);
382 const struct index_state
*index
= o
->src_index
;
384 while (o
->pos
< index
->cache_nr
) {
385 struct cache_entry
*next
= index
->cache
[o
->pos
];
386 if (len
!= ce_namelen(next
))
388 if (memcmp(ce
->name
, next
->name
, len
))
395 * The unpack_trees() interface is designed for merging, so
396 * the different source entries are designed primarily for
397 * the source trees, with the old index being really mainly
398 * used for being replaced by the result.
400 * For diffing, the index is more important, and we only have a
403 * We're supposed to return how many index entries we want to skip.
405 * This wrapper makes it all more readable, and takes care of all
406 * the fairly complex unpack_trees() semantic requirements, including
407 * the skipping, the path matching, the type conflict cases etc.
409 static int oneway_diff(struct cache_entry
**src
, struct unpack_trees_options
*o
)
411 struct cache_entry
*idx
= src
[0];
412 struct cache_entry
*tree
= src
[1];
413 struct rev_info
*revs
= o
->unpack_data
;
415 if (idx
&& ce_stage(idx
))
416 skip_same_name(idx
, o
);
419 * Unpack-trees generates a DF/conflict entry if
420 * there was a directory in the index and a tree
421 * in the tree. From a diff standpoint, that's a
422 * delete of the tree and a create of the file.
424 if (tree
== o
->df_conflict_entry
)
427 if (ce_path_match(idx
? idx
: tree
, revs
->prune_data
))
428 do_oneway_diff(o
, idx
, tree
);
433 int run_diff_index(struct rev_info
*revs
, int cached
)
437 const char *tree_name
;
438 struct unpack_trees_options opts
;
441 mark_merge_entries();
443 ent
= revs
->pending
.objects
[0].item
;
444 tree_name
= revs
->pending
.objects
[0].name
;
445 tree
= parse_tree_indirect(ent
->sha1
);
447 return error("bad tree object %s", tree_name
);
449 memset(&opts
, 0, sizeof(opts
));
451 opts
.index_only
= cached
;
452 opts
.diff_index_cached
= (cached
&&
453 !DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
));
455 opts
.fn
= oneway_diff
;
456 opts
.unpack_data
= revs
;
457 opts
.src_index
= &the_index
;
458 opts
.dst_index
= NULL
;
460 init_tree_desc(&t
, tree
->buffer
, tree
->size
);
461 if (unpack_trees(1, &t
, &opts
))
464 diff_set_mnemonic_prefix(&revs
->diffopt
, "c/", cached
? "i/" : "w/");
465 diffcore_std(&revs
->diffopt
);
466 diff_flush(&revs
->diffopt
);
470 int do_diff_cache(const unsigned char *tree_sha1
, struct diff_options
*opt
)
473 struct rev_info revs
;
475 struct cache_entry
**dst
;
476 struct cache_entry
*last
= NULL
;
477 struct unpack_trees_options opts
;
481 * This is used by git-blame to run diff-cache internally;
482 * it potentially needs to repeatedly run this, so we will
483 * start by removing the higher order entries the last round
487 for (i
= 0; i
< active_nr
; i
++) {
488 struct cache_entry
*ce
= active_cache
[i
];
490 if (last
&& !strcmp(ce
->name
, last
->name
))
492 cache_tree_invalidate_path(active_cache_tree
,
495 ce
->ce_flags
|= CE_REMOVE
;
499 active_nr
= dst
- active_cache
;
501 init_revisions(&revs
, NULL
);
502 revs
.prune_data
= opt
->paths
;
503 tree
= parse_tree_indirect(tree_sha1
);
505 die("bad tree object %s", sha1_to_hex(tree_sha1
));
507 memset(&opts
, 0, sizeof(opts
));
510 opts
.diff_index_cached
= !DIFF_OPT_TST(opt
, FIND_COPIES_HARDER
);
512 opts
.fn
= oneway_diff
;
513 opts
.unpack_data
= &revs
;
514 opts
.src_index
= &the_index
;
515 opts
.dst_index
= &the_index
;
517 init_tree_desc(&t
, tree
->buffer
, tree
->size
);
518 if (unpack_trees(1, &t
, &opts
))
523 int index_differs_from(const char *def
, int diff_flags
)
527 init_revisions(&rev
, NULL
);
528 setup_revisions(0, NULL
, &rev
, def
);
529 DIFF_OPT_SET(&rev
.diffopt
, QUIET
);
530 DIFF_OPT_SET(&rev
.diffopt
, EXIT_WITH_STATUS
);
531 rev
.diffopt
.flags
|= diff_flags
;
532 run_diff_index(&rev
, 1);
533 if (rev
.pending
.alloc
)
534 free(rev
.pending
.objects
);
535 return (DIFF_OPT_TST(&rev
.diffopt
, HAS_CHANGES
) != 0);