2 * Copyright (C) 2005 Junio C Hamano
10 #include "cache-tree.h"
11 #include "unpack-trees.h"
13 #include "submodule.h"
20 * Has the work tree entity been removed?
22 * Return 1 if it was removed from the work tree, 0 if an entity to be
23 * compared with the cache entry ce still exists (the latter includes
24 * the case where a directory that is not a submodule repository
25 * exists for ce that is a submodule -- it is a submodule that is not
26 * checked out). Return negative for an error.
28 static int check_removed(const struct cache_entry
*ce
, struct stat
*st
)
30 if (lstat(ce
->name
, st
) < 0) {
31 if (errno
!= ENOENT
&& errno
!= ENOTDIR
)
35 if (has_symlink_leading_path(ce
->name
, ce_namelen(ce
)))
37 if (S_ISDIR(st
->st_mode
)) {
38 unsigned char sub
[20];
41 * If ce is already a gitlink, we can have a plain
42 * directory (i.e. the submodule is not checked out),
43 * or a checked out submodule. Either case this is not
44 * a case where something was removed from the work tree,
45 * so we will return 0.
47 * Otherwise, if the directory is not a submodule
48 * repository, that means ce which was a blob turned into
49 * a directory --- the blob was removed!
51 if (!S_ISGITLINK(ce
->ce_mode
) &&
52 resolve_gitlink_ref(ce
->name
, "HEAD", sub
))
59 * Has a file changed or has a submodule new commits or a dirty work tree?
61 * Return 1 when changes are detected, 0 otherwise. If the DIRTY_SUBMODULES
62 * option is set, the caller does not only want to know if a submodule is
63 * modified at all but wants to know all the conditions that are met (new
64 * commits, untracked content and/or modified content).
66 static int match_stat_with_submodule(struct diff_options
*diffopt
,
67 struct cache_entry
*ce
, struct stat
*st
,
68 unsigned ce_option
, unsigned *dirty_submodule
)
70 int changed
= ce_match_stat(ce
, st
, ce_option
);
71 if (S_ISGITLINK(ce
->ce_mode
)) {
72 unsigned orig_flags
= diffopt
->flags
;
73 if (!DIFF_OPT_TST(diffopt
, OVERRIDE_SUBMODULE_CONFIG
))
74 set_diffopt_flags_from_submodule_config(diffopt
, ce
->name
);
75 if (DIFF_OPT_TST(diffopt
, IGNORE_SUBMODULES
))
77 else if (!DIFF_OPT_TST(diffopt
, IGNORE_DIRTY_SUBMODULES
)
78 && (!changed
|| DIFF_OPT_TST(diffopt
, DIRTY_SUBMODULES
)))
79 *dirty_submodule
= is_submodule_modified(ce
->name
, DIFF_OPT_TST(diffopt
, IGNORE_UNTRACKED_IN_SUBMODULES
));
80 diffopt
->flags
= orig_flags
;
85 int run_diff_files(struct rev_info
*revs
, unsigned int option
)
88 int diff_unmerged_stage
= revs
->max_count
;
89 int silent_on_removed
= option
& DIFF_SILENT_ON_REMOVED
;
90 unsigned ce_option
= ((option
& DIFF_RACY_IS_MODIFIED
)
91 ? CE_MATCH_RACY_IS_DIRTY
: 0);
93 diff_set_mnemonic_prefix(&revs
->diffopt
, "i/", "w/");
95 if (diff_unmerged_stage
< 0)
96 diff_unmerged_stage
= 2;
98 for (i
= 0; i
< entries
; i
++) {
100 unsigned int oldmode
, newmode
;
101 struct cache_entry
*ce
= active_cache
[i
];
103 unsigned dirty_submodule
= 0;
105 if (DIFF_OPT_TST(&revs
->diffopt
, QUICK
) &&
106 DIFF_OPT_TST(&revs
->diffopt
, HAS_CHANGES
))
109 if (!ce_path_match(ce
, revs
->prune_data
))
113 struct combine_diff_path
*dpath
;
114 int num_compare_stages
= 0;
117 path_len
= ce_namelen(ce
);
119 dpath
= xmalloc(combine_diff_path_size(5, path_len
));
120 dpath
->path
= (char *) &(dpath
->parent
[5]);
123 dpath
->len
= path_len
;
124 memcpy(dpath
->path
, ce
->name
, path_len
);
125 dpath
->path
[path_len
] = '\0';
126 hashclr(dpath
->sha1
);
127 memset(&(dpath
->parent
[0]), 0,
128 sizeof(struct combine_diff_parent
)*5);
130 changed
= check_removed(ce
, &st
);
132 dpath
->mode
= ce_mode_from_stat(ce
, st
.st_mode
);
138 if (silent_on_removed
)
142 while (i
< entries
) {
143 struct cache_entry
*nce
= active_cache
[i
];
146 if (strcmp(ce
->name
, nce
->name
))
149 /* Stage #2 (ours) is the first parent,
150 * stage #3 (theirs) is the second.
152 stage
= ce_stage(nce
);
154 int mode
= nce
->ce_mode
;
155 num_compare_stages
++;
156 hashcpy(dpath
->parent
[stage
-2].sha1
, nce
->sha1
);
157 dpath
->parent
[stage
-2].mode
= ce_mode_from_stat(nce
, mode
);
158 dpath
->parent
[stage
-2].status
=
159 DIFF_STATUS_MODIFIED
;
162 /* diff against the proper unmerged stage */
163 if (stage
== diff_unmerged_stage
)
168 * Compensate for loop update
172 if (revs
->combine_merges
&& num_compare_stages
== 2) {
173 show_combined_diff(dpath
, 2,
174 revs
->dense_combined_merges
,
183 * Show the diff for the 'ce' if we found the one
184 * from the desired stage.
186 diff_unmerge(&revs
->diffopt
, ce
->name
, 0, null_sha1
);
187 if (ce_stage(ce
) != diff_unmerged_stage
)
191 if (ce_uptodate(ce
) || ce_skip_worktree(ce
))
194 /* If CE_VALID is set, don't look at workdir for file removal */
195 changed
= (ce
->ce_flags
& CE_VALID
) ? 0 : check_removed(ce
, &st
);
201 if (silent_on_removed
)
203 diff_addremove(&revs
->diffopt
, '-', ce
->ce_mode
,
204 ce
->sha1
, ce
->name
, 0);
207 changed
= match_stat_with_submodule(&revs
->diffopt
, ce
, &st
,
208 ce_option
, &dirty_submodule
);
209 if (!changed
&& !dirty_submodule
) {
210 ce_mark_uptodate(ce
);
211 if (!DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
))
214 oldmode
= ce
->ce_mode
;
215 newmode
= ce_mode_from_stat(ce
, st
.st_mode
);
216 diff_change(&revs
->diffopt
, oldmode
, newmode
,
217 ce
->sha1
, (changed
? null_sha1
: ce
->sha1
),
218 ce
->name
, 0, dirty_submodule
);
221 diffcore_std(&revs
->diffopt
);
222 diff_flush(&revs
->diffopt
);
230 /* A file entry went away or appeared */
231 static void diff_index_show_file(struct rev_info
*revs
,
233 struct cache_entry
*ce
,
234 const unsigned char *sha1
, unsigned int mode
,
235 unsigned dirty_submodule
)
237 diff_addremove(&revs
->diffopt
, prefix
[0], mode
,
238 sha1
, ce
->name
, dirty_submodule
);
241 static int get_stat_data(struct cache_entry
*ce
,
242 const unsigned char **sha1p
,
244 int cached
, int match_missing
,
245 unsigned *dirty_submodule
, struct diff_options
*diffopt
)
247 const unsigned char *sha1
= ce
->sha1
;
248 unsigned int mode
= ce
->ce_mode
;
250 if (!cached
&& !ce_uptodate(ce
)) {
253 changed
= check_removed(ce
, &st
);
264 changed
= match_stat_with_submodule(diffopt
, ce
, &st
,
267 mode
= ce_mode_from_stat(ce
, st
.st_mode
);
277 static void show_new_file(struct rev_info
*revs
,
278 struct cache_entry
*new,
279 int cached
, int match_missing
)
281 const unsigned char *sha1
;
283 unsigned dirty_submodule
= 0;
286 * New file in the index: it might actually be different in
289 if (get_stat_data(new, &sha1
, &mode
, cached
, match_missing
,
290 &dirty_submodule
, &revs
->diffopt
) < 0)
293 diff_index_show_file(revs
, "+", new, sha1
, mode
, dirty_submodule
);
296 static int show_modified(struct rev_info
*revs
,
297 struct cache_entry
*old
,
298 struct cache_entry
*new,
300 int cached
, int match_missing
)
302 unsigned int mode
, oldmode
;
303 const unsigned char *sha1
;
304 unsigned dirty_submodule
= 0;
306 if (get_stat_data(new, &sha1
, &mode
, cached
, match_missing
,
307 &dirty_submodule
, &revs
->diffopt
) < 0) {
309 diff_index_show_file(revs
, "-", old
,
310 old
->sha1
, old
->ce_mode
, 0);
314 if (revs
->combine_merges
&& !cached
&&
315 (hashcmp(sha1
, old
->sha1
) || hashcmp(old
->sha1
, new->sha1
))) {
316 struct combine_diff_path
*p
;
317 int pathlen
= ce_namelen(new);
319 p
= xmalloc(combine_diff_path_size(2, pathlen
));
320 p
->path
= (char *) &p
->parent
[2];
323 memcpy(p
->path
, new->name
, pathlen
);
324 p
->path
[pathlen
] = 0;
327 memset(p
->parent
, 0, 2 * sizeof(struct combine_diff_parent
));
328 p
->parent
[0].status
= DIFF_STATUS_MODIFIED
;
329 p
->parent
[0].mode
= new->ce_mode
;
330 hashcpy(p
->parent
[0].sha1
, new->sha1
);
331 p
->parent
[1].status
= DIFF_STATUS_MODIFIED
;
332 p
->parent
[1].mode
= old
->ce_mode
;
333 hashcpy(p
->parent
[1].sha1
, old
->sha1
);
334 show_combined_diff(p
, 2, revs
->dense_combined_merges
, revs
);
339 oldmode
= old
->ce_mode
;
340 if (mode
== oldmode
&& !hashcmp(sha1
, old
->sha1
) && !dirty_submodule
&&
341 !DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
))
344 diff_change(&revs
->diffopt
, oldmode
, mode
,
345 old
->sha1
, sha1
, old
->name
, 0, dirty_submodule
);
350 * This gets a mix of an existing index and a tree, one pathname entry
351 * at a time. The index entry may be a single stage-0 one, but it could
352 * also be multiple unmerged entries (in which case idx_pos/idx_nr will
353 * give you the position and number of entries in the index).
355 static void do_oneway_diff(struct unpack_trees_options
*o
,
356 struct cache_entry
*idx
,
357 struct cache_entry
*tree
)
359 struct rev_info
*revs
= o
->unpack_data
;
360 int match_missing
, cached
;
362 /* if the entry is not checked out, don't examine work tree */
363 cached
= o
->index_only
||
364 (idx
&& ((idx
->ce_flags
& CE_VALID
) || ce_skip_worktree(idx
)));
366 * Backward compatibility wart - "diff-index -m" does
367 * not mean "do not ignore merges", but "match_missing".
369 * But with the revision flag parsing, that's found in
370 * "!revs->ignore_merges".
372 match_missing
= !revs
->ignore_merges
;
374 if (cached
&& idx
&& ce_stage(idx
)) {
375 diff_unmerge(&revs
->diffopt
, idx
->name
, idx
->ce_mode
,
381 * Something added to the tree?
384 show_new_file(revs
, idx
, cached
, match_missing
);
389 * Something removed from the tree?
392 diff_index_show_file(revs
, "-", tree
, tree
->sha1
, tree
->ce_mode
, 0);
396 /* Show difference between old and new */
397 show_modified(revs
, tree
, idx
, 1, cached
, match_missing
);
401 * The unpack_trees() interface is designed for merging, so
402 * the different source entries are designed primarily for
403 * the source trees, with the old index being really mainly
404 * used for being replaced by the result.
406 * For diffing, the index is more important, and we only have a
409 * We're supposed to advance o->pos to skip what we have already processed.
411 * This wrapper makes it all more readable, and takes care of all
412 * the fairly complex unpack_trees() semantic requirements, including
413 * the skipping, the path matching, the type conflict cases etc.
415 static int oneway_diff(struct cache_entry
**src
, struct unpack_trees_options
*o
)
417 struct cache_entry
*idx
= src
[0];
418 struct cache_entry
*tree
= src
[1];
419 struct rev_info
*revs
= o
->unpack_data
;
422 * Unpack-trees generates a DF/conflict entry if
423 * there was a directory in the index and a tree
424 * in the tree. From a diff standpoint, that's a
425 * delete of the tree and a create of the file.
427 if (tree
== o
->df_conflict_entry
)
430 if (ce_path_match(idx
? idx
: tree
, revs
->prune_data
))
431 do_oneway_diff(o
, idx
, tree
);
436 int run_diff_index(struct rev_info
*revs
, int cached
)
440 const char *tree_name
;
441 struct unpack_trees_options opts
;
444 ent
= revs
->pending
.objects
[0].item
;
445 tree_name
= revs
->pending
.objects
[0].name
;
446 tree
= parse_tree_indirect(ent
->sha1
);
448 return error("bad tree object %s", tree_name
);
450 memset(&opts
, 0, sizeof(opts
));
452 opts
.index_only
= cached
;
453 opts
.diff_index_cached
= (cached
&&
454 !DIFF_OPT_TST(&revs
->diffopt
, FIND_COPIES_HARDER
));
456 opts
.fn
= oneway_diff
;
457 opts
.unpack_data
= revs
;
458 opts
.src_index
= &the_index
;
459 opts
.dst_index
= NULL
;
461 init_tree_desc(&t
, tree
->buffer
, tree
->size
);
462 if (unpack_trees(1, &t
, &opts
))
465 diff_set_mnemonic_prefix(&revs
->diffopt
, "c/", cached
? "i/" : "w/");
466 diffcore_fix_diff_index(&revs
->diffopt
);
467 diffcore_std(&revs
->diffopt
);
468 diff_flush(&revs
->diffopt
);
472 int do_diff_cache(const unsigned char *tree_sha1
, struct diff_options
*opt
)
475 struct rev_info revs
;
477 struct cache_entry
**dst
;
478 struct cache_entry
*last
= NULL
;
479 struct unpack_trees_options opts
;
483 * This is used by git-blame to run diff-cache internally;
484 * it potentially needs to repeatedly run this, so we will
485 * start by removing the higher order entries the last round
489 for (i
= 0; i
< active_nr
; i
++) {
490 struct cache_entry
*ce
= active_cache
[i
];
492 if (last
&& !strcmp(ce
->name
, last
->name
))
494 cache_tree_invalidate_path(active_cache_tree
,
497 ce
->ce_flags
|= CE_REMOVE
;
501 active_nr
= dst
- active_cache
;
503 init_revisions(&revs
, NULL
);
504 revs
.prune_data
= opt
->paths
;
505 tree
= parse_tree_indirect(tree_sha1
);
507 die("bad tree object %s", sha1_to_hex(tree_sha1
));
509 memset(&opts
, 0, sizeof(opts
));
512 opts
.diff_index_cached
= !DIFF_OPT_TST(opt
, FIND_COPIES_HARDER
);
514 opts
.fn
= oneway_diff
;
515 opts
.unpack_data
= &revs
;
516 opts
.src_index
= &the_index
;
517 opts
.dst_index
= &the_index
;
519 init_tree_desc(&t
, tree
->buffer
, tree
->size
);
520 if (unpack_trees(1, &t
, &opts
))
525 int index_differs_from(const char *def
, int diff_flags
)
528 struct setup_revision_opt opt
;
530 init_revisions(&rev
, NULL
);
531 memset(&opt
, 0, sizeof(opt
));
533 setup_revisions(0, NULL
, &rev
, &opt
);
534 DIFF_OPT_SET(&rev
.diffopt
, QUICK
);
535 DIFF_OPT_SET(&rev
.diffopt
, EXIT_WITH_STATUS
);
536 rev
.diffopt
.flags
|= diff_flags
;
537 run_diff_index(&rev
, 1);
538 if (rev
.pending
.alloc
)
539 free(rev
.pending
.objects
);
540 return (DIFF_OPT_TST(&rev
.diffopt
, HAS_CHANGES
) != 0);