1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
6 #include "cache-tree.h"
7 #include "unpack-trees.h"
11 #include "split-index.h"
15 * Error messages expected by scripts out of plumbing commands such as
16 * read-tree. Non-scripted Porcelain is not required to use these messages
17 * and in fact are encouraged to reword them to better suit their particular
18 * situation better. See how "git checkout" and "git merge" replaces
19 * them using setup_unpack_trees_porcelain(), for example.
21 static const char *unpack_plumbing_errors
[NB_UNPACK_TREES_ERROR_TYPES
] = {
22 /* ERROR_WOULD_OVERWRITE */
23 "Entry '%s' would be overwritten by merge. Cannot merge.",
25 /* ERROR_NOT_UPTODATE_FILE */
26 "Entry '%s' not uptodate. Cannot merge.",
28 /* ERROR_NOT_UPTODATE_DIR */
29 "Updating '%s' would lose untracked files in it",
31 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */
32 "Untracked working tree file '%s' would be overwritten by merge.",
34 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */
35 "Untracked working tree file '%s' would be removed by merge.",
37 /* ERROR_BIND_OVERLAP */
38 "Entry '%s' overlaps with '%s'. Cannot bind.",
40 /* ERROR_SPARSE_NOT_UPTODATE_FILE */
41 "Entry '%s' not uptodate. Cannot update sparse checkout.",
43 /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */
44 "Working tree file '%s' would be overwritten by sparse checkout update.",
46 /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */
47 "Working tree file '%s' would be removed by sparse checkout update.",
50 #define ERRORMSG(o,type) \
51 ( ((o) && (o)->msgs[(type)]) \
52 ? ((o)->msgs[(type)]) \
53 : (unpack_plumbing_errors[(type)]) )
55 void setup_unpack_trees_porcelain(struct unpack_trees_options
*opts
,
59 const char **msgs
= opts
->msgs
;
61 const char *cmd2
= strcmp(cmd
, "checkout") ? cmd
: "switch branches";
63 if (advice_commit_before_merge
)
64 msg
= "Your local changes to the following files would be overwritten by %s:\n%%s"
65 "Please, commit your changes or stash them before you can %s.";
67 msg
= "Your local changes to the following files would be overwritten by %s:\n%%s";
68 msgs
[ERROR_WOULD_OVERWRITE
] = msgs
[ERROR_NOT_UPTODATE_FILE
] =
69 xstrfmt(msg
, cmd
, cmd2
);
71 msgs
[ERROR_NOT_UPTODATE_DIR
] =
72 "Updating the following directories would lose untracked files in it:\n%s";
74 if (advice_commit_before_merge
)
75 msg
= "The following untracked working tree files would be %s by %s:\n%%s"
76 "Please move or remove them before you can %s.";
78 msg
= "The following untracked working tree files would be %s by %s:\n%%s";
80 msgs
[ERROR_WOULD_LOSE_UNTRACKED_REMOVED
] = xstrfmt(msg
, "removed", cmd
, cmd2
);
81 msgs
[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
] = xstrfmt(msg
, "overwritten", cmd
, cmd2
);
84 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
85 * cannot easily display it as a list.
87 msgs
[ERROR_BIND_OVERLAP
] = "Entry '%s' overlaps with '%s'. Cannot bind.";
89 msgs
[ERROR_SPARSE_NOT_UPTODATE_FILE
] =
90 "Cannot update sparse checkout: the following entries are not up-to-date:\n%s";
91 msgs
[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN
] =
92 "The following Working tree files would be overwritten by sparse checkout update:\n%s";
93 msgs
[ERROR_WOULD_LOSE_ORPHANED_REMOVED
] =
94 "The following Working tree files would be removed by sparse checkout update:\n%s";
96 opts
->show_all_errors
= 1;
97 /* rejected paths may not have a static buffer */
98 for (i
= 0; i
< ARRAY_SIZE(opts
->unpack_rejects
); i
++)
99 opts
->unpack_rejects
[i
].strdup_strings
= 1;
102 static int do_add_entry(struct unpack_trees_options
*o
, struct cache_entry
*ce
,
103 unsigned int set
, unsigned int clear
)
110 ce
->ce_flags
= (ce
->ce_flags
& ~clear
) | set
;
111 return add_index_entry(&o
->result
, ce
,
112 ADD_CACHE_OK_TO_ADD
| ADD_CACHE_OK_TO_REPLACE
);
115 static struct cache_entry
*dup_entry(const struct cache_entry
*ce
)
117 unsigned int size
= ce_size(ce
);
118 struct cache_entry
*new = xmalloc(size
);
120 memcpy(new, ce
, size
);
124 static void add_entry(struct unpack_trees_options
*o
,
125 const struct cache_entry
*ce
,
126 unsigned int set
, unsigned int clear
)
128 do_add_entry(o
, dup_entry(ce
), set
, clear
);
132 * add error messages on path <path>
133 * corresponding to the type <e> with the message <msg>
134 * indicating if it should be display in porcelain or not
136 static int add_rejected_path(struct unpack_trees_options
*o
,
137 enum unpack_trees_error_types e
,
140 if (!o
->show_all_errors
)
141 return error(ERRORMSG(o
, e
), path
);
144 * Otherwise, insert in a list for future display by
145 * display_error_msgs()
147 string_list_append(&o
->unpack_rejects
[e
], path
);
152 * display all the error messages stored in a nice way
154 static void display_error_msgs(struct unpack_trees_options
*o
)
157 int something_displayed
= 0;
158 for (e
= 0; e
< NB_UNPACK_TREES_ERROR_TYPES
; e
++) {
159 struct string_list
*rejects
= &o
->unpack_rejects
[e
];
160 if (rejects
->nr
> 0) {
161 struct strbuf path
= STRBUF_INIT
;
162 something_displayed
= 1;
163 for (i
= 0; i
< rejects
->nr
; i
++)
164 strbuf_addf(&path
, "\t%s\n", rejects
->items
[i
].string
);
165 error(ERRORMSG(o
, e
), path
.buf
);
166 strbuf_release(&path
);
168 string_list_clear(rejects
, 0);
170 if (something_displayed
)
171 fprintf(stderr
, "Aborting\n");
175 * Unlink the last component and schedule the leading directories for
176 * removal, such that empty directories get removed.
178 static void unlink_entry(const struct cache_entry
*ce
)
180 if (!check_leading_path(ce
->name
, ce_namelen(ce
)))
182 if (remove_or_warn(ce
->ce_mode
, ce
->name
))
184 schedule_dir_for_removal(ce
->name
, ce_namelen(ce
));
187 static struct checkout state
;
188 static int check_updates(struct unpack_trees_options
*o
)
190 unsigned cnt
= 0, total
= 0;
191 struct progress
*progress
= NULL
;
192 struct index_state
*index
= &o
->result
;
196 if (o
->update
&& o
->verbose_update
) {
197 for (total
= cnt
= 0; cnt
< index
->cache_nr
; cnt
++) {
198 const struct cache_entry
*ce
= index
->cache
[cnt
];
199 if (ce
->ce_flags
& (CE_UPDATE
| CE_WT_REMOVE
))
203 progress
= start_progress_delay(_("Checking out files"),
209 git_attr_set_direction(GIT_ATTR_CHECKOUT
, &o
->result
);
210 for (i
= 0; i
< index
->cache_nr
; i
++) {
211 const struct cache_entry
*ce
= index
->cache
[i
];
213 if (ce
->ce_flags
& CE_WT_REMOVE
) {
214 display_progress(progress
, ++cnt
);
215 if (o
->update
&& !o
->dry_run
)
220 remove_marked_cache_entries(&o
->result
);
221 remove_scheduled_dirs();
223 for (i
= 0; i
< index
->cache_nr
; i
++) {
224 struct cache_entry
*ce
= index
->cache
[i
];
226 if (ce
->ce_flags
& CE_UPDATE
) {
227 if (ce
->ce_flags
& CE_WT_REMOVE
)
228 die("BUG: both update and delete flags are set on %s",
230 display_progress(progress
, ++cnt
);
231 ce
->ce_flags
&= ~CE_UPDATE
;
232 if (o
->update
&& !o
->dry_run
) {
233 errs
|= checkout_entry(ce
, &state
, NULL
);
237 stop_progress(&progress
);
239 git_attr_set_direction(GIT_ATTR_CHECKIN
, NULL
);
243 static int verify_uptodate_sparse(const struct cache_entry
*ce
,
244 struct unpack_trees_options
*o
);
245 static int verify_absent_sparse(const struct cache_entry
*ce
,
246 enum unpack_trees_error_types
,
247 struct unpack_trees_options
*o
);
249 static int apply_sparse_checkout(struct index_state
*istate
,
250 struct cache_entry
*ce
,
251 struct unpack_trees_options
*o
)
253 int was_skip_worktree
= ce_skip_worktree(ce
);
255 if (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
)
256 ce
->ce_flags
|= CE_SKIP_WORKTREE
;
258 ce
->ce_flags
&= ~CE_SKIP_WORKTREE
;
259 if (was_skip_worktree
!= ce_skip_worktree(ce
)) {
260 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
261 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
265 * if (!was_skip_worktree && !ce_skip_worktree()) {
266 * This is perfectly normal. Move on;
271 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout
272 * area as a result of ce_skip_worktree() shortcuts in
273 * verify_absent() and verify_uptodate().
274 * Make sure they don't modify worktree if they are already
275 * outside checkout area
277 if (was_skip_worktree
&& ce_skip_worktree(ce
)) {
278 ce
->ce_flags
&= ~CE_UPDATE
;
281 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also
282 * on to get that file removed from both index and worktree.
283 * If that file is already outside worktree area, don't
286 if (ce
->ce_flags
& CE_REMOVE
)
287 ce
->ce_flags
&= ~CE_WT_REMOVE
;
290 if (!was_skip_worktree
&& ce_skip_worktree(ce
)) {
292 * If CE_UPDATE is set, verify_uptodate() must be called already
293 * also stat info may have lost after merged_entry() so calling
294 * verify_uptodate() again may fail
296 if (!(ce
->ce_flags
& CE_UPDATE
) && verify_uptodate_sparse(ce
, o
))
298 ce
->ce_flags
|= CE_WT_REMOVE
;
299 ce
->ce_flags
&= ~CE_UPDATE
;
301 if (was_skip_worktree
&& !ce_skip_worktree(ce
)) {
302 if (verify_absent_sparse(ce
, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
, o
))
304 ce
->ce_flags
|= CE_UPDATE
;
309 static inline int call_unpack_fn(const struct cache_entry
* const *src
,
310 struct unpack_trees_options
*o
)
312 int ret
= o
->fn(src
, o
);
318 static void mark_ce_used(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
320 ce
->ce_flags
|= CE_UNPACKED
;
322 if (o
->cache_bottom
< o
->src_index
->cache_nr
&&
323 o
->src_index
->cache
[o
->cache_bottom
] == ce
) {
324 int bottom
= o
->cache_bottom
;
325 while (bottom
< o
->src_index
->cache_nr
&&
326 o
->src_index
->cache
[bottom
]->ce_flags
& CE_UNPACKED
)
328 o
->cache_bottom
= bottom
;
332 static void mark_all_ce_unused(struct index_state
*index
)
335 for (i
= 0; i
< index
->cache_nr
; i
++)
336 index
->cache
[i
]->ce_flags
&= ~(CE_UNPACKED
| CE_ADDED
| CE_NEW_SKIP_WORKTREE
);
339 static int locate_in_src_index(const struct cache_entry
*ce
,
340 struct unpack_trees_options
*o
)
342 struct index_state
*index
= o
->src_index
;
343 int len
= ce_namelen(ce
);
344 int pos
= index_name_pos(index
, ce
->name
, len
);
351 * We call unpack_index_entry() with an unmerged cache entry
352 * only in diff-index, and it wants a single callback. Skip
353 * the other unmerged entry with the same name.
355 static void mark_ce_used_same_name(struct cache_entry
*ce
,
356 struct unpack_trees_options
*o
)
358 struct index_state
*index
= o
->src_index
;
359 int len
= ce_namelen(ce
);
362 for (pos
= locate_in_src_index(ce
, o
); pos
< index
->cache_nr
; pos
++) {
363 struct cache_entry
*next
= index
->cache
[pos
];
364 if (len
!= ce_namelen(next
) ||
365 memcmp(ce
->name
, next
->name
, len
))
367 mark_ce_used(next
, o
);
371 static struct cache_entry
*next_cache_entry(struct unpack_trees_options
*o
)
373 const struct index_state
*index
= o
->src_index
;
374 int pos
= o
->cache_bottom
;
376 while (pos
< index
->cache_nr
) {
377 struct cache_entry
*ce
= index
->cache
[pos
];
378 if (!(ce
->ce_flags
& CE_UNPACKED
))
385 static void add_same_unmerged(const struct cache_entry
*ce
,
386 struct unpack_trees_options
*o
)
388 struct index_state
*index
= o
->src_index
;
389 int len
= ce_namelen(ce
);
390 int pos
= index_name_pos(index
, ce
->name
, len
);
393 die("programming error in a caller of mark_ce_used_same_name");
394 for (pos
= -pos
- 1; pos
< index
->cache_nr
; pos
++) {
395 struct cache_entry
*next
= index
->cache
[pos
];
396 if (len
!= ce_namelen(next
) ||
397 memcmp(ce
->name
, next
->name
, len
))
399 add_entry(o
, next
, 0, 0);
400 mark_ce_used(next
, o
);
404 static int unpack_index_entry(struct cache_entry
*ce
,
405 struct unpack_trees_options
*o
)
407 const struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
414 if (o
->skip_unmerged
) {
415 add_entry(o
, ce
, 0, 0);
419 ret
= call_unpack_fn(src
, o
);
421 mark_ce_used_same_name(ce
, o
);
425 static int find_cache_pos(struct traverse_info
*, const struct name_entry
*);
427 static void restore_cache_bottom(struct traverse_info
*info
, int bottom
)
429 struct unpack_trees_options
*o
= info
->data
;
431 if (o
->diff_index_cached
)
433 o
->cache_bottom
= bottom
;
436 static int switch_cache_bottom(struct traverse_info
*info
)
438 struct unpack_trees_options
*o
= info
->data
;
441 if (o
->diff_index_cached
)
443 ret
= o
->cache_bottom
;
444 pos
= find_cache_pos(info
->prev
, &info
->name
);
447 o
->cache_bottom
= -2 - pos
;
449 o
->cache_bottom
= o
->src_index
->cache_nr
;
453 static int traverse_trees_recursive(int n
, unsigned long dirmask
,
454 unsigned long df_conflicts
,
455 struct name_entry
*names
,
456 struct traverse_info
*info
)
459 struct tree_desc t
[MAX_UNPACK_TREES
];
460 void *buf
[MAX_UNPACK_TREES
];
461 struct traverse_info newinfo
;
462 struct name_entry
*p
;
470 newinfo
.pathspec
= info
->pathspec
;
472 newinfo
.pathlen
+= tree_entry_len(p
) + 1;
473 newinfo
.df_conflicts
|= df_conflicts
;
475 for (i
= 0; i
< n
; i
++, dirmask
>>= 1) {
476 const unsigned char *sha1
= NULL
;
478 sha1
= names
[i
].sha1
;
479 buf
[i
] = fill_tree_descriptor(t
+i
, sha1
);
482 bottom
= switch_cache_bottom(&newinfo
);
483 ret
= traverse_trees(n
, t
, &newinfo
);
484 restore_cache_bottom(&newinfo
, bottom
);
486 for (i
= 0; i
< n
; i
++)
493 * Compare the traverse-path to the cache entry without actually
494 * having to generate the textual representation of the traverse
497 * NOTE! This *only* compares up to the size of the traverse path
498 * itself - the caller needs to do the final check for the cache
499 * entry having more data at the end!
501 static int do_compare_entry_piecewise(const struct cache_entry
*ce
, const struct traverse_info
*info
, const struct name_entry
*n
)
503 int len
, pathlen
, ce_len
;
507 int cmp
= do_compare_entry_piecewise(ce
, info
->prev
,
512 pathlen
= info
->pathlen
;
513 ce_len
= ce_namelen(ce
);
515 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
516 if (ce_len
< pathlen
)
520 ce_name
= ce
->name
+ pathlen
;
522 len
= tree_entry_len(n
);
523 return df_name_compare(ce_name
, ce_len
, S_IFREG
, n
->path
, len
, n
->mode
);
526 static int do_compare_entry(const struct cache_entry
*ce
,
527 const struct traverse_info
*info
,
528 const struct name_entry
*n
)
530 int len
, pathlen
, ce_len
;
535 * If we have not precomputed the traverse path, it is quicker
536 * to avoid doing so. But if we have precomputed it,
537 * it is quicker to use the precomputed version.
539 if (!info
->traverse_path
)
540 return do_compare_entry_piecewise(ce
, info
, n
);
542 cmp
= strncmp(ce
->name
, info
->traverse_path
, info
->pathlen
);
546 pathlen
= info
->pathlen
;
547 ce_len
= ce_namelen(ce
);
549 if (ce_len
< pathlen
)
553 ce_name
= ce
->name
+ pathlen
;
555 len
= tree_entry_len(n
);
556 return df_name_compare(ce_name
, ce_len
, S_IFREG
, n
->path
, len
, n
->mode
);
559 static int compare_entry(const struct cache_entry
*ce
, const struct traverse_info
*info
, const struct name_entry
*n
)
561 int cmp
= do_compare_entry(ce
, info
, n
);
566 * Even if the beginning compared identically, the ce should
567 * compare as bigger than a directory leading up to it!
569 return ce_namelen(ce
) > traverse_path_len(info
, n
);
572 static int ce_in_traverse_path(const struct cache_entry
*ce
,
573 const struct traverse_info
*info
)
577 if (do_compare_entry(ce
, info
->prev
, &info
->name
))
580 * If ce (blob) is the same name as the path (which is a tree
581 * we will be descending into), it won't be inside it.
583 return (info
->pathlen
< ce_namelen(ce
));
586 static struct cache_entry
*create_ce_entry(const struct traverse_info
*info
, const struct name_entry
*n
, int stage
)
588 int len
= traverse_path_len(info
, n
);
589 struct cache_entry
*ce
= xcalloc(1, cache_entry_size(len
));
591 ce
->ce_mode
= create_ce_mode(n
->mode
);
592 ce
->ce_flags
= create_ce_flags(stage
);
593 ce
->ce_namelen
= len
;
594 hashcpy(ce
->sha1
, n
->sha1
);
595 make_traverse_path(ce
->name
, info
, n
);
600 static int unpack_nondirectories(int n
, unsigned long mask
,
601 unsigned long dirmask
,
602 struct cache_entry
**src
,
603 const struct name_entry
*names
,
604 const struct traverse_info
*info
)
607 struct unpack_trees_options
*o
= info
->data
;
608 unsigned long conflicts
= info
->df_conflicts
| dirmask
;
610 /* Do we have *only* directories? Nothing to do */
611 if (mask
== dirmask
&& !src
[0])
615 * Ok, we've filled in up to any potential index entry in src[0],
618 for (i
= 0; i
< n
; i
++) {
620 unsigned int bit
= 1ul << i
;
621 if (conflicts
& bit
) {
622 src
[i
+ o
->merge
] = o
->df_conflict_entry
;
629 else if (i
+ 1 < o
->head_idx
)
631 else if (i
+ 1 > o
->head_idx
)
635 src
[i
+ o
->merge
] = create_ce_entry(info
, names
+ i
, stage
);
639 int rc
= call_unpack_fn((const struct cache_entry
* const *)src
,
641 for (i
= 0; i
< n
; i
++) {
642 struct cache_entry
*ce
= src
[i
+ o
->merge
];
643 if (ce
!= o
->df_conflict_entry
)
649 for (i
= 0; i
< n
; i
++)
650 if (src
[i
] && src
[i
] != o
->df_conflict_entry
)
651 if (do_add_entry(o
, src
[i
], 0, 0))
657 static int unpack_failed(struct unpack_trees_options
*o
, const char *message
)
659 discard_index(&o
->result
);
660 if (!o
->gently
&& !o
->exiting_early
) {
662 return error("%s", message
);
669 * The tree traversal is looking at name p. If we have a matching entry,
670 * return it. If name p is a directory in the index, do not return
671 * anything, as we will want to match it when the traversal descends into
674 static int find_cache_pos(struct traverse_info
*info
,
675 const struct name_entry
*p
)
678 struct unpack_trees_options
*o
= info
->data
;
679 struct index_state
*index
= o
->src_index
;
680 int pfxlen
= info
->pathlen
;
681 int p_len
= tree_entry_len(p
);
683 for (pos
= o
->cache_bottom
; pos
< index
->cache_nr
; pos
++) {
684 const struct cache_entry
*ce
= index
->cache
[pos
];
685 const char *ce_name
, *ce_slash
;
688 if (ce
->ce_flags
& CE_UNPACKED
) {
690 * cache_bottom entry is already unpacked, so
691 * we can never match it; don't check it
694 if (pos
== o
->cache_bottom
)
698 if (!ce_in_traverse_path(ce
, info
)) {
700 * Check if we can skip future cache checks
701 * (because we're already past all possible
702 * entries in the traverse path).
704 if (info
->traverse_path
) {
705 if (strncmp(ce
->name
, info
->traverse_path
,
711 ce_name
= ce
->name
+ pfxlen
;
712 ce_slash
= strchr(ce_name
, '/');
714 ce_len
= ce_slash
- ce_name
;
716 ce_len
= ce_namelen(ce
) - pfxlen
;
717 cmp
= name_compare(p
->path
, p_len
, ce_name
, ce_len
);
719 * Exact match; if we have a directory we need to
720 * delay returning it.
723 return ce_slash
? -2 - pos
: pos
;
725 continue; /* keep looking */
727 * ce_name sorts after p->path; could it be that we
728 * have files under p->path directory in the index?
729 * E.g. ce_name == "t-i", and p->path == "t"; we may
730 * have "t/a" in the index.
732 if (p_len
< ce_len
&& !memcmp(ce_name
, p
->path
, p_len
) &&
733 ce_name
[p_len
] < '/')
734 continue; /* keep looking */
740 static struct cache_entry
*find_cache_entry(struct traverse_info
*info
,
741 const struct name_entry
*p
)
743 int pos
= find_cache_pos(info
, p
);
744 struct unpack_trees_options
*o
= info
->data
;
747 return o
->src_index
->cache
[pos
];
752 static void debug_path(struct traverse_info
*info
)
755 debug_path(info
->prev
);
756 if (*info
->prev
->name
.path
)
759 printf("%s", info
->name
.path
);
762 static void debug_name_entry(int i
, struct name_entry
*n
)
764 printf("ent#%d %06o %s\n", i
,
765 n
->path
? n
->mode
: 0,
766 n
->path
? n
->path
: "(missing)");
769 static void debug_unpack_callback(int n
,
771 unsigned long dirmask
,
772 struct name_entry
*names
,
773 struct traverse_info
*info
)
776 printf("* unpack mask %lu, dirmask %lu, cnt %d ",
780 for (i
= 0; i
< n
; i
++)
781 debug_name_entry(i
, names
+ i
);
784 static int unpack_callback(int n
, unsigned long mask
, unsigned long dirmask
, struct name_entry
*names
, struct traverse_info
*info
)
786 struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
787 struct unpack_trees_options
*o
= info
->data
;
788 const struct name_entry
*p
= names
;
790 /* Find first entry with a real name (we could use "mask" too) */
795 debug_unpack_callback(n
, mask
, dirmask
, names
, info
);
797 /* Are we supposed to look at the index too? */
801 struct cache_entry
*ce
;
803 if (o
->diff_index_cached
)
804 ce
= next_cache_entry(o
);
806 ce
= find_cache_entry(info
, p
);
810 cmp
= compare_entry(ce
, info
, p
);
812 if (unpack_index_entry(ce
, o
) < 0)
813 return unpack_failed(o
, NULL
);
819 * If we skip unmerged index
820 * entries, we'll skip this
821 * entry *and* the tree
822 * entries associated with it!
824 if (o
->skip_unmerged
) {
825 add_same_unmerged(ce
, o
);
835 if (unpack_nondirectories(n
, mask
, dirmask
, src
, names
, info
) < 0)
838 if (o
->merge
&& src
[0]) {
839 if (ce_stage(src
[0]))
840 mark_ce_used_same_name(src
[0], o
);
842 mark_ce_used(src
[0], o
);
845 /* Now handle any directories.. */
847 /* special case: "diff-index --cached" looking at a tree */
848 if (o
->diff_index_cached
&&
849 n
== 1 && dirmask
== 1 && S_ISDIR(names
->mode
)) {
851 matches
= cache_tree_matches_traversal(o
->src_index
->cache_tree
,
854 * Everything under the name matches; skip the
855 * entire hierarchy. diff_index_cached codepath
856 * special cases D/F conflicts in such a way that
857 * it does not do any look-ahead, so this is safe.
860 o
->cache_bottom
+= matches
;
865 if (traverse_trees_recursive(n
, dirmask
, mask
& ~dirmask
,
874 static int clear_ce_flags_1(struct cache_entry
**cache
, int nr
,
875 struct strbuf
*prefix
,
876 int select_mask
, int clear_mask
,
877 struct exclude_list
*el
, int defval
);
879 /* Whole directory matching */
880 static int clear_ce_flags_dir(struct cache_entry
**cache
, int nr
,
881 struct strbuf
*prefix
,
883 int select_mask
, int clear_mask
,
884 struct exclude_list
*el
, int defval
)
886 struct cache_entry
**cache_end
;
888 int ret
= is_excluded_from_list(prefix
->buf
, prefix
->len
,
889 basename
, &dtype
, el
);
892 strbuf_addch(prefix
, '/');
894 /* If undecided, use matching result of parent dir in defval */
898 for (cache_end
= cache
; cache_end
!= cache
+ nr
; cache_end
++) {
899 struct cache_entry
*ce
= *cache_end
;
900 if (strncmp(ce
->name
, prefix
->buf
, prefix
->len
))
905 * TODO: check el, if there are no patterns that may conflict
906 * with ret (iow, we know in advance the incl/excl
907 * decision for the entire directory), clear flag here without
908 * calling clear_ce_flags_1(). That function will call
909 * the expensive is_excluded_from_list() on every entry.
911 rc
= clear_ce_flags_1(cache
, cache_end
- cache
,
913 select_mask
, clear_mask
,
915 strbuf_setlen(prefix
, prefix
->len
- 1);
920 * Traverse the index, find every entry that matches according to
921 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the
922 * number of traversed entries.
924 * If select_mask is non-zero, only entries whose ce_flags has on of
925 * those bits enabled are traversed.
927 * cache : pointer to an index entry
928 * prefix_len : an offset to its path
930 * The current path ("prefix") including the trailing '/' is
931 * cache[0]->name[0..(prefix_len-1)]
932 * Top level path has prefix_len zero.
934 static int clear_ce_flags_1(struct cache_entry
**cache
, int nr
,
935 struct strbuf
*prefix
,
936 int select_mask
, int clear_mask
,
937 struct exclude_list
*el
, int defval
)
939 struct cache_entry
**cache_end
= cache
+ nr
;
942 * Process all entries that have the given prefix and meet
943 * select_mask condition
945 while(cache
!= cache_end
) {
946 struct cache_entry
*ce
= *cache
;
947 const char *name
, *slash
;
950 if (select_mask
&& !(ce
->ce_flags
& select_mask
)) {
955 if (prefix
->len
&& strncmp(ce
->name
, prefix
->buf
, prefix
->len
))
958 name
= ce
->name
+ prefix
->len
;
959 slash
= strchr(name
, '/');
961 /* If it's a directory, try whole directory match first */
966 strbuf_add(prefix
, name
, len
);
968 processed
= clear_ce_flags_dir(cache
, cache_end
- cache
,
970 prefix
->buf
+ prefix
->len
- len
,
971 select_mask
, clear_mask
,
974 /* clear_c_f_dir eats a whole dir already? */
977 strbuf_setlen(prefix
, prefix
->len
- len
);
981 strbuf_addch(prefix
, '/');
982 cache
+= clear_ce_flags_1(cache
, cache_end
- cache
,
984 select_mask
, clear_mask
, el
, defval
);
985 strbuf_setlen(prefix
, prefix
->len
- len
- 1);
990 dtype
= ce_to_dtype(ce
);
991 ret
= is_excluded_from_list(ce
->name
, ce_namelen(ce
),
996 ce
->ce_flags
&= ~clear_mask
;
999 return nr
- (cache_end
- cache
);
1002 static int clear_ce_flags(struct cache_entry
**cache
, int nr
,
1003 int select_mask
, int clear_mask
,
1004 struct exclude_list
*el
)
1006 static struct strbuf prefix
= STRBUF_INIT
;
1008 strbuf_reset(&prefix
);
1010 return clear_ce_flags_1(cache
, nr
,
1012 select_mask
, clear_mask
,
1017 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout
1019 static void mark_new_skip_worktree(struct exclude_list
*el
,
1020 struct index_state
*the_index
,
1021 int select_flag
, int skip_wt_flag
)
1026 * 1. Pretend the narrowest worktree: only unmerged entries
1029 for (i
= 0; i
< the_index
->cache_nr
; i
++) {
1030 struct cache_entry
*ce
= the_index
->cache
[i
];
1032 if (select_flag
&& !(ce
->ce_flags
& select_flag
))
1036 ce
->ce_flags
|= skip_wt_flag
;
1038 ce
->ce_flags
&= ~skip_wt_flag
;
1042 * 2. Widen worktree according to sparse-checkout file.
1043 * Matched entries will have skip_wt_flag cleared (i.e. "in")
1045 clear_ce_flags(the_index
->cache
, the_index
->cache_nr
,
1046 select_flag
, skip_wt_flag
, el
);
1049 static int verify_absent(const struct cache_entry
*,
1050 enum unpack_trees_error_types
,
1051 struct unpack_trees_options
*);
1053 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the
1054 * resulting index, -2 on failure to reflect the changes to the work tree.
1056 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally
1058 int unpack_trees(unsigned len
, struct tree_desc
*t
, struct unpack_trees_options
*o
)
1061 static struct cache_entry
*dfc
;
1062 struct exclude_list el
;
1064 if (len
> MAX_UNPACK_TREES
)
1065 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES
);
1066 memset(&state
, 0, sizeof(state
));
1067 state
.base_dir
= "";
1070 state
.refresh_cache
= 1;
1071 state
.istate
= &o
->result
;
1073 memset(&el
, 0, sizeof(el
));
1074 if (!core_apply_sparse_checkout
|| !o
->update
)
1075 o
->skip_sparse_checkout
= 1;
1076 if (!o
->skip_sparse_checkout
) {
1077 char *sparse
= git_pathdup("info/sparse-checkout");
1078 if (add_excludes_from_file_to_list(sparse
, "", 0, &el
, 0) < 0)
1079 o
->skip_sparse_checkout
= 1;
1085 memset(&o
->result
, 0, sizeof(o
->result
));
1086 o
->result
.initialized
= 1;
1087 o
->result
.timestamp
.sec
= o
->src_index
->timestamp
.sec
;
1088 o
->result
.timestamp
.nsec
= o
->src_index
->timestamp
.nsec
;
1089 o
->result
.version
= o
->src_index
->version
;
1090 o
->result
.split_index
= o
->src_index
->split_index
;
1091 if (o
->result
.split_index
)
1092 o
->result
.split_index
->refcount
++;
1093 hashcpy(o
->result
.sha1
, o
->src_index
->sha1
);
1094 o
->merge_size
= len
;
1095 mark_all_ce_unused(o
->src_index
);
1098 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries
1100 if (!o
->skip_sparse_checkout
)
1101 mark_new_skip_worktree(o
->el
, o
->src_index
, 0, CE_NEW_SKIP_WORKTREE
);
1104 dfc
= xcalloc(1, cache_entry_size(0));
1105 o
->df_conflict_entry
= dfc
;
1108 const char *prefix
= o
->prefix
? o
->prefix
: "";
1109 struct traverse_info info
;
1111 setup_traverse_info(&info
, prefix
);
1112 info
.fn
= unpack_callback
;
1114 info
.show_all_errors
= o
->show_all_errors
;
1115 info
.pathspec
= o
->pathspec
;
1119 * Unpack existing index entries that sort before the
1120 * prefix the tree is spliced into. Note that o->merge
1121 * is always true in this case.
1124 struct cache_entry
*ce
= next_cache_entry(o
);
1127 if (ce_in_traverse_path(ce
, &info
))
1129 if (unpack_index_entry(ce
, o
) < 0)
1134 if (traverse_trees(len
, t
, &info
) < 0)
1138 /* Any left-over entries in the index? */
1141 struct cache_entry
*ce
= next_cache_entry(o
);
1144 if (unpack_index_entry(ce
, o
) < 0)
1148 mark_all_ce_unused(o
->src_index
);
1150 if (o
->trivial_merges_only
&& o
->nontrivial_merge
) {
1151 ret
= unpack_failed(o
, "Merge requires file-level merging");
1155 if (!o
->skip_sparse_checkout
) {
1156 int empty_worktree
= 1;
1159 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1
1160 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
1161 * so apply_sparse_checkout() won't attempt to remove it from worktree
1163 mark_new_skip_worktree(o
->el
, &o
->result
, CE_ADDED
, CE_SKIP_WORKTREE
| CE_NEW_SKIP_WORKTREE
);
1166 for (i
= 0; i
< o
->result
.cache_nr
; i
++) {
1167 struct cache_entry
*ce
= o
->result
.cache
[i
];
1170 * Entries marked with CE_ADDED in merged_entry() do not have
1171 * verify_absent() check (the check is effectively disabled
1172 * because CE_NEW_SKIP_WORKTREE is set unconditionally).
1174 * Do the real check now because we have had
1175 * correct CE_NEW_SKIP_WORKTREE
1177 if (ce
->ce_flags
& CE_ADDED
&&
1178 verify_absent(ce
, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
, o
)) {
1179 if (!o
->show_all_errors
)
1184 if (apply_sparse_checkout(&o
->result
, ce
, o
)) {
1185 if (!o
->show_all_errors
)
1189 if (!ce_skip_worktree(ce
))
1196 * Sparse checkout is meant to narrow down checkout area
1197 * but it does not make sense to narrow down to empty working
1198 * tree. This is usually a mistake in sparse checkout rules.
1199 * Do not allow users to do that.
1201 if (o
->result
.cache_nr
&& empty_worktree
) {
1202 ret
= unpack_failed(o
, "Sparse checkout leaves no entry on working directory");
1207 o
->src_index
= NULL
;
1208 ret
= check_updates(o
) ? (-2) : 0;
1211 if (!o
->result
.cache_tree
)
1212 o
->result
.cache_tree
= cache_tree();
1213 if (!cache_tree_fully_valid(o
->result
.cache_tree
))
1214 cache_tree_update(&o
->result
,
1218 discard_index(o
->dst_index
);
1219 *o
->dst_index
= o
->result
;
1221 discard_index(&o
->result
);
1225 clear_exclude_list(&el
);
1229 if (o
->show_all_errors
)
1230 display_error_msgs(o
);
1231 mark_all_ce_unused(o
->src_index
);
1232 ret
= unpack_failed(o
, NULL
);
1233 if (o
->exiting_early
)
1238 /* Here come the merge functions */
1240 static int reject_merge(const struct cache_entry
*ce
,
1241 struct unpack_trees_options
*o
)
1243 return o
->gently
? -1 :
1244 add_rejected_path(o
, ERROR_WOULD_OVERWRITE
, ce
->name
);
1247 static int same(const struct cache_entry
*a
, const struct cache_entry
*b
)
1253 if ((a
->ce_flags
| b
->ce_flags
) & CE_CONFLICTED
)
1255 return a
->ce_mode
== b
->ce_mode
&&
1256 !hashcmp(a
->sha1
, b
->sha1
);
1261 * When a CE gets turned into an unmerged entry, we
1262 * want it to be up-to-date
1264 static int verify_uptodate_1(const struct cache_entry
*ce
,
1265 struct unpack_trees_options
*o
,
1266 enum unpack_trees_error_types error_type
)
1274 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again
1275 * if this entry is truly up-to-date because this file may be
1278 if ((ce
->ce_flags
& CE_VALID
) || ce_skip_worktree(ce
))
1279 ; /* keep checking */
1280 else if (o
->reset
|| ce_uptodate(ce
))
1283 if (!lstat(ce
->name
, &st
)) {
1284 int flags
= CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
;
1285 unsigned changed
= ie_match_stat(o
->src_index
, ce
, &st
, flags
);
1289 * NEEDSWORK: the current default policy is to allow
1290 * submodule to be out of sync wrt the superproject
1291 * index. This needs to be tightened later for
1292 * submodules that are marked to be automatically
1295 if (S_ISGITLINK(ce
->ce_mode
))
1299 if (errno
== ENOENT
)
1301 return o
->gently
? -1 :
1302 add_rejected_path(o
, error_type
, ce
->name
);
1305 static int verify_uptodate(const struct cache_entry
*ce
,
1306 struct unpack_trees_options
*o
)
1308 if (!o
->skip_sparse_checkout
&& (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
))
1310 return verify_uptodate_1(ce
, o
, ERROR_NOT_UPTODATE_FILE
);
1313 static int verify_uptodate_sparse(const struct cache_entry
*ce
,
1314 struct unpack_trees_options
*o
)
1316 return verify_uptodate_1(ce
, o
, ERROR_SPARSE_NOT_UPTODATE_FILE
);
1319 static void invalidate_ce_path(const struct cache_entry
*ce
,
1320 struct unpack_trees_options
*o
)
1324 cache_tree_invalidate_path(o
->src_index
, ce
->name
);
1325 untracked_cache_invalidate_path(o
->src_index
, ce
->name
);
1329 * Check that checking out ce->sha1 in subdir ce->name is not
1330 * going to overwrite any working files.
1332 * Currently, git does not checkout subprojects during a superproject
1333 * checkout, so it is not going to overwrite anything.
1335 static int verify_clean_submodule(const struct cache_entry
*ce
,
1336 enum unpack_trees_error_types error_type
,
1337 struct unpack_trees_options
*o
)
1342 static int verify_clean_subdirectory(const struct cache_entry
*ce
,
1343 enum unpack_trees_error_types error_type
,
1344 struct unpack_trees_options
*o
)
1347 * we are about to extract "ce->name"; we would not want to lose
1348 * anything in the existing directory there.
1352 struct dir_struct d
;
1355 unsigned char sha1
[20];
1357 if (S_ISGITLINK(ce
->ce_mode
) &&
1358 resolve_gitlink_ref(ce
->name
, "HEAD", sha1
) == 0) {
1359 /* If we are not going to update the submodule, then
1362 if (!hashcmp(sha1
, ce
->sha1
))
1364 return verify_clean_submodule(ce
, error_type
, o
);
1368 * First let's make sure we do not have a local modification
1369 * in that directory.
1371 namelen
= ce_namelen(ce
);
1372 for (i
= locate_in_src_index(ce
, o
);
1373 i
< o
->src_index
->cache_nr
;
1375 struct cache_entry
*ce2
= o
->src_index
->cache
[i
];
1376 int len
= ce_namelen(ce2
);
1377 if (len
< namelen
||
1378 strncmp(ce
->name
, ce2
->name
, namelen
) ||
1379 ce2
->name
[namelen
] != '/')
1382 * ce2->name is an entry in the subdirectory to be
1385 if (!ce_stage(ce2
)) {
1386 if (verify_uptodate(ce2
, o
))
1388 add_entry(o
, ce2
, CE_REMOVE
, 0);
1389 mark_ce_used(ce2
, o
);
1395 * Then we need to make sure that we do not lose a locally
1396 * present file that is not ignored.
1398 pathbuf
= xstrfmt("%.*s/", namelen
, ce
->name
);
1400 memset(&d
, 0, sizeof(d
));
1402 d
.exclude_per_dir
= o
->dir
->exclude_per_dir
;
1403 i
= read_directory(&d
, pathbuf
, namelen
+1, NULL
);
1405 return o
->gently
? -1 :
1406 add_rejected_path(o
, ERROR_NOT_UPTODATE_DIR
, ce
->name
);
1412 * This gets called when there was no index entry for the tree entry 'dst',
1413 * but we found a file in the working tree that 'lstat()' said was fine,
1414 * and we're on a case-insensitive filesystem.
1416 * See if we can find a case-insensitive match in the index that also
1417 * matches the stat information, and assume it's that other file!
1419 static int icase_exists(struct unpack_trees_options
*o
, const char *name
, int len
, struct stat
*st
)
1421 const struct cache_entry
*src
;
1423 src
= index_file_exists(o
->src_index
, name
, len
, 1);
1424 return src
&& !ie_match_stat(o
->src_index
, src
, st
, CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
);
1427 static int check_ok_to_remove(const char *name
, int len
, int dtype
,
1428 const struct cache_entry
*ce
, struct stat
*st
,
1429 enum unpack_trees_error_types error_type
,
1430 struct unpack_trees_options
*o
)
1432 const struct cache_entry
*result
;
1435 * It may be that the 'lstat()' succeeded even though
1436 * target 'ce' was absent, because there is an old
1437 * entry that is different only in case..
1439 * Ignore that lstat() if it matches.
1441 if (ignore_case
&& icase_exists(o
, name
, len
, st
))
1445 is_excluded(o
->dir
, name
, &dtype
))
1447 * ce->name is explicitly excluded, so it is Ok to
1451 if (S_ISDIR(st
->st_mode
)) {
1453 * We are checking out path "foo" and
1454 * found "foo/." in the working tree.
1455 * This is tricky -- if we have modified
1456 * files that are in "foo/" we would lose
1459 if (verify_clean_subdirectory(ce
, error_type
, o
) < 0)
1465 * The previous round may already have decided to
1466 * delete this path, which is in a subdirectory that
1467 * is being replaced with a blob.
1469 result
= index_file_exists(&o
->result
, name
, len
, 0);
1471 if (result
->ce_flags
& CE_REMOVE
)
1475 return o
->gently
? -1 :
1476 add_rejected_path(o
, error_type
, name
);
1480 * We do not want to remove or overwrite a working tree file that
1481 * is not tracked, unless it is ignored.
1483 static int verify_absent_1(const struct cache_entry
*ce
,
1484 enum unpack_trees_error_types error_type
,
1485 struct unpack_trees_options
*o
)
1490 if (o
->index_only
|| o
->reset
|| !o
->update
)
1493 len
= check_leading_path(ce
->name
, ce_namelen(ce
));
1500 path
= xmemdupz(ce
->name
, len
);
1501 if (lstat(path
, &st
))
1502 ret
= error("cannot stat '%s': %s", path
,
1505 ret
= check_ok_to_remove(path
, len
, DT_UNKNOWN
, NULL
,
1506 &st
, error_type
, o
);
1509 } else if (lstat(ce
->name
, &st
)) {
1510 if (errno
!= ENOENT
)
1511 return error("cannot stat '%s': %s", ce
->name
,
1515 return check_ok_to_remove(ce
->name
, ce_namelen(ce
),
1516 ce_to_dtype(ce
), ce
, &st
,
1521 static int verify_absent(const struct cache_entry
*ce
,
1522 enum unpack_trees_error_types error_type
,
1523 struct unpack_trees_options
*o
)
1525 if (!o
->skip_sparse_checkout
&& (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
))
1527 return verify_absent_1(ce
, error_type
, o
);
1530 static int verify_absent_sparse(const struct cache_entry
*ce
,
1531 enum unpack_trees_error_types error_type
,
1532 struct unpack_trees_options
*o
)
1534 enum unpack_trees_error_types orphaned_error
= error_type
;
1535 if (orphaned_error
== ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
)
1536 orphaned_error
= ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN
;
1538 return verify_absent_1(ce
, orphaned_error
, o
);
1541 static int merged_entry(const struct cache_entry
*ce
,
1542 const struct cache_entry
*old
,
1543 struct unpack_trees_options
*o
)
1545 int update
= CE_UPDATE
;
1546 struct cache_entry
*merge
= dup_entry(ce
);
1550 * New index entries. In sparse checkout, the following
1551 * verify_absent() will be delayed until after
1552 * traverse_trees() finishes in unpack_trees(), then:
1554 * - CE_NEW_SKIP_WORKTREE will be computed correctly
1555 * - verify_absent() be called again, this time with
1556 * correct CE_NEW_SKIP_WORKTREE
1558 * verify_absent() call here does nothing in sparse
1559 * checkout (i.e. o->skip_sparse_checkout == 0)
1562 merge
->ce_flags
|= CE_NEW_SKIP_WORKTREE
;
1564 if (verify_absent(merge
,
1565 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
, o
)) {
1569 invalidate_ce_path(merge
, o
);
1570 } else if (!(old
->ce_flags
& CE_CONFLICTED
)) {
1572 * See if we can re-use the old CE directly?
1573 * That way we get the uptodate stat info.
1575 * This also removes the UPDATE flag on a match; otherwise
1576 * we will end up overwriting local changes in the work tree.
1578 if (same(old
, merge
)) {
1579 copy_cache_entry(merge
, old
);
1582 if (verify_uptodate(old
, o
)) {
1586 /* Migrate old flags over */
1587 update
|= old
->ce_flags
& (CE_SKIP_WORKTREE
| CE_NEW_SKIP_WORKTREE
);
1588 invalidate_ce_path(old
, o
);
1592 * Previously unmerged entry left as an existence
1593 * marker by read_index_unmerged();
1595 invalidate_ce_path(old
, o
);
1598 do_add_entry(o
, merge
, update
, CE_STAGEMASK
);
1602 static int deleted_entry(const struct cache_entry
*ce
,
1603 const struct cache_entry
*old
,
1604 struct unpack_trees_options
*o
)
1606 /* Did it exist in the index? */
1608 if (verify_absent(ce
, ERROR_WOULD_LOSE_UNTRACKED_REMOVED
, o
))
1612 if (!(old
->ce_flags
& CE_CONFLICTED
) && verify_uptodate(old
, o
))
1614 add_entry(o
, ce
, CE_REMOVE
, 0);
1615 invalidate_ce_path(ce
, o
);
1619 static int keep_entry(const struct cache_entry
*ce
,
1620 struct unpack_trees_options
*o
)
1622 add_entry(o
, ce
, 0, 0);
1627 static void show_stage_entry(FILE *o
,
1628 const char *label
, const struct cache_entry
*ce
)
1631 fprintf(o
, "%s (missing)\n", label
);
1633 fprintf(o
, "%s%06o %s %d\t%s\n",
1636 sha1_to_hex(ce
->sha1
),
1642 int threeway_merge(const struct cache_entry
* const *stages
,
1643 struct unpack_trees_options
*o
)
1645 const struct cache_entry
*index
;
1646 const struct cache_entry
*head
;
1647 const struct cache_entry
*remote
= stages
[o
->head_idx
+ 1];
1650 int remote_match
= 0;
1652 int df_conflict_head
= 0;
1653 int df_conflict_remote
= 0;
1655 int any_anc_missing
= 0;
1656 int no_anc_exists
= 1;
1659 for (i
= 1; i
< o
->head_idx
; i
++) {
1660 if (!stages
[i
] || stages
[i
] == o
->df_conflict_entry
)
1661 any_anc_missing
= 1;
1667 head
= stages
[o
->head_idx
];
1669 if (head
== o
->df_conflict_entry
) {
1670 df_conflict_head
= 1;
1674 if (remote
== o
->df_conflict_entry
) {
1675 df_conflict_remote
= 1;
1680 * First, if there's a #16 situation, note that to prevent #13
1683 if (!same(remote
, head
)) {
1684 for (i
= 1; i
< o
->head_idx
; i
++) {
1685 if (same(stages
[i
], head
)) {
1688 if (same(stages
[i
], remote
)) {
1695 * We start with cases where the index is allowed to match
1696 * something other than the head: #14(ALT) and #2ALT, where it
1697 * is permitted to match the result instead.
1699 /* #14, #14ALT, #2ALT */
1700 if (remote
&& !df_conflict_head
&& head_match
&& !remote_match
) {
1701 if (index
&& !same(index
, remote
) && !same(index
, head
))
1702 return reject_merge(index
, o
);
1703 return merged_entry(remote
, index
, o
);
1706 * If we have an entry in the index cache, then we want to
1707 * make sure that it matches head.
1709 if (index
&& !same(index
, head
))
1710 return reject_merge(index
, o
);
1714 if (same(head
, remote
))
1715 return merged_entry(head
, index
, o
);
1717 if (!df_conflict_remote
&& remote_match
&& !head_match
)
1718 return merged_entry(head
, index
, o
);
1722 if (!head
&& !remote
&& any_anc_missing
)
1726 * Under the "aggressive" rule, we resolve mostly trivial
1727 * cases that we historically had git-merge-one-file resolve.
1729 if (o
->aggressive
) {
1730 int head_deleted
= !head
;
1731 int remote_deleted
= !remote
;
1732 const struct cache_entry
*ce
= NULL
;
1741 for (i
= 1; i
< o
->head_idx
; i
++) {
1742 if (stages
[i
] && stages
[i
] != o
->df_conflict_entry
) {
1751 * Deleted in one and unchanged in the other.
1753 if ((head_deleted
&& remote_deleted
) ||
1754 (head_deleted
&& remote
&& remote_match
) ||
1755 (remote_deleted
&& head
&& head_match
)) {
1757 return deleted_entry(index
, index
, o
);
1758 if (ce
&& !head_deleted
) {
1759 if (verify_absent(ce
, ERROR_WOULD_LOSE_UNTRACKED_REMOVED
, o
))
1765 * Added in both, identically.
1767 if (no_anc_exists
&& head
&& remote
&& same(head
, remote
))
1768 return merged_entry(head
, index
, o
);
1772 /* Below are "no merge" cases, which require that the index be
1773 * up-to-date to avoid the files getting overwritten with
1774 * conflict resolution files.
1777 if (verify_uptodate(index
, o
))
1781 o
->nontrivial_merge
= 1;
1783 /* #2, #3, #4, #6, #7, #9, #10, #11. */
1785 if (!head_match
|| !remote_match
) {
1786 for (i
= 1; i
< o
->head_idx
; i
++) {
1787 if (stages
[i
] && stages
[i
] != o
->df_conflict_entry
) {
1788 keep_entry(stages
[i
], o
);
1796 fprintf(stderr
, "read-tree: warning #16 detected\n");
1797 show_stage_entry(stderr
, "head ", stages
[head_match
]);
1798 show_stage_entry(stderr
, "remote ", stages
[remote_match
]);
1801 if (head
) { count
+= keep_entry(head
, o
); }
1802 if (remote
) { count
+= keep_entry(remote
, o
); }
1809 * The rule is to "carry forward" what is in the index without losing
1810 * information across a "fast-forward", favoring a successful merge
1811 * over a merge failure when it makes sense. For details of the
1812 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
1815 int twoway_merge(const struct cache_entry
* const *src
,
1816 struct unpack_trees_options
*o
)
1818 const struct cache_entry
*current
= src
[0];
1819 const struct cache_entry
*oldtree
= src
[1];
1820 const struct cache_entry
*newtree
= src
[2];
1822 if (o
->merge_size
!= 2)
1823 return error("Cannot do a twoway merge of %d trees",
1826 if (oldtree
== o
->df_conflict_entry
)
1828 if (newtree
== o
->df_conflict_entry
)
1832 if (current
->ce_flags
& CE_CONFLICTED
) {
1833 if (same(oldtree
, newtree
) || o
->reset
) {
1835 return deleted_entry(current
, current
, o
);
1837 return merged_entry(newtree
, current
, o
);
1839 return reject_merge(current
, o
);
1840 } else if ((!oldtree
&& !newtree
) || /* 4 and 5 */
1841 (!oldtree
&& newtree
&&
1842 same(current
, newtree
)) || /* 6 and 7 */
1843 (oldtree
&& newtree
&&
1844 same(oldtree
, newtree
)) || /* 14 and 15 */
1845 (oldtree
&& newtree
&&
1846 !same(oldtree
, newtree
) && /* 18 and 19 */
1847 same(current
, newtree
))) {
1848 return keep_entry(current
, o
);
1849 } else if (oldtree
&& !newtree
&& same(current
, oldtree
)) {
1851 return deleted_entry(oldtree
, current
, o
);
1852 } else if (oldtree
&& newtree
&&
1853 same(current
, oldtree
) && !same(current
, newtree
)) {
1855 return merged_entry(newtree
, current
, o
);
1857 return reject_merge(current
, o
);
1860 if (oldtree
&& !o
->initial_checkout
) {
1862 * deletion of the path was staged;
1864 if (same(oldtree
, newtree
))
1866 return reject_merge(oldtree
, o
);
1868 return merged_entry(newtree
, current
, o
);
1870 return deleted_entry(oldtree
, current
, o
);
1876 * Keep the index entries at stage0, collapse stage1 but make sure
1877 * stage0 does not have anything there.
1879 int bind_merge(const struct cache_entry
* const *src
,
1880 struct unpack_trees_options
*o
)
1882 const struct cache_entry
*old
= src
[0];
1883 const struct cache_entry
*a
= src
[1];
1885 if (o
->merge_size
!= 1)
1886 return error("Cannot do a bind merge of %d trees",
1889 return o
->gently
? -1 :
1890 error(ERRORMSG(o
, ERROR_BIND_OVERLAP
), a
->name
, old
->name
);
1892 return keep_entry(old
, o
);
1894 return merged_entry(a
, NULL
, o
);
1901 * - take the stat information from stage0, take the data from stage1
1903 int oneway_merge(const struct cache_entry
* const *src
,
1904 struct unpack_trees_options
*o
)
1906 const struct cache_entry
*old
= src
[0];
1907 const struct cache_entry
*a
= src
[1];
1909 if (o
->merge_size
!= 1)
1910 return error("Cannot do a oneway merge of %d trees",
1913 if (!a
|| a
== o
->df_conflict_entry
)
1914 return deleted_entry(old
, old
, o
);
1916 if (old
&& same(old
, a
)) {
1918 if (o
->reset
&& o
->update
&& !ce_uptodate(old
) && !ce_skip_worktree(old
)) {
1920 if (lstat(old
->name
, &st
) ||
1921 ie_match_stat(o
->src_index
, old
, &st
, CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
))
1922 update
|= CE_UPDATE
;
1924 add_entry(o
, old
, update
, 0);
1927 return merged_entry(a
, old
, o
);