1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
6 #include "cache-tree.h"
7 #include "unpack-trees.h"
13 * Error messages expected by scripts out of plumbing commands such as
14 * read-tree. Non-scripted Porcelain is not required to use these messages
15 * and in fact are encouraged to reword them to better suit their particular
16 * situation better. See how "git checkout" replaces not_uptodate_file to
17 * explain why it does not allow switching between branches when you have
18 * local changes, for example.
20 static struct unpack_trees_error_msgs unpack_plumbing_errors
= {
22 "Entry '%s' would be overwritten by merge. Cannot merge.",
24 /* not_uptodate_file */
25 "Entry '%s' not uptodate. Cannot merge.",
27 /* not_uptodate_dir */
28 "Updating '%s' would lose untracked files in it",
30 /* would_lose_untracked */
31 "Untracked working tree file '%s' would be %s by merge.",
34 "Entry '%s' overlaps with '%s'. Cannot bind.",
37 #define ERRORMSG(o,fld) \
38 ( ((o) && (o)->msgs.fld) \
40 : (unpack_plumbing_errors.fld) )
42 static void add_entry(struct unpack_trees_options
*o
, struct cache_entry
*ce
,
43 unsigned int set
, unsigned int clear
)
45 unsigned int size
= ce_size(ce
);
46 struct cache_entry
*new = xmalloc(size
);
48 clear
|= CE_HASHED
| CE_UNHASHED
;
50 memcpy(new, ce
, size
);
52 new->ce_flags
= (new->ce_flags
& ~clear
) | set
;
53 add_index_entry(&o
->result
, new, ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
);
57 * Unlink the last component and schedule the leading directories for
58 * removal, such that empty directories get removed.
60 static void unlink_entry(struct cache_entry
*ce
)
62 if (has_symlink_or_noent_leading_path(ce
->name
, ce_namelen(ce
)))
64 if (unlink_or_warn(ce
->name
))
66 schedule_dir_for_removal(ce
->name
, ce_namelen(ce
));
69 static struct checkout state
;
70 static int check_updates(struct unpack_trees_options
*o
)
72 unsigned cnt
= 0, total
= 0;
73 struct progress
*progress
= NULL
;
74 struct index_state
*index
= &o
->result
;
78 if (o
->update
&& o
->verbose_update
) {
79 for (total
= cnt
= 0; cnt
< index
->cache_nr
; cnt
++) {
80 struct cache_entry
*ce
= index
->cache
[cnt
];
81 if (ce
->ce_flags
& (CE_UPDATE
| CE_REMOVE
))
85 progress
= start_progress_delay("Checking out files",
91 git_attr_set_direction(GIT_ATTR_CHECKOUT
, &o
->result
);
92 for (i
= 0; i
< index
->cache_nr
; i
++) {
93 struct cache_entry
*ce
= index
->cache
[i
];
95 if (ce
->ce_flags
& CE_REMOVE
) {
96 display_progress(progress
, ++cnt
);
101 remove_marked_cache_entries(&o
->result
);
102 remove_scheduled_dirs();
104 for (i
= 0; i
< index
->cache_nr
; i
++) {
105 struct cache_entry
*ce
= index
->cache
[i
];
107 if (ce
->ce_flags
& CE_UPDATE
) {
108 display_progress(progress
, ++cnt
);
109 ce
->ce_flags
&= ~CE_UPDATE
;
111 errs
|= checkout_entry(ce
, &state
, NULL
);
115 stop_progress(&progress
);
117 git_attr_set_direction(GIT_ATTR_CHECKIN
, NULL
);
121 static inline int call_unpack_fn(struct cache_entry
**src
, struct unpack_trees_options
*o
)
123 int ret
= o
->fn(src
, o
);
129 static void mark_ce_used(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
131 ce
->ce_flags
|= CE_UNPACKED
;
133 if (o
->cache_bottom
< o
->src_index
->cache_nr
&&
134 o
->src_index
->cache
[o
->cache_bottom
] == ce
) {
135 int bottom
= o
->cache_bottom
;
136 while (bottom
< o
->src_index
->cache_nr
&&
137 o
->src_index
->cache
[bottom
]->ce_flags
& CE_UNPACKED
)
139 o
->cache_bottom
= bottom
;
143 static void mark_all_ce_unused(struct index_state
*index
)
146 for (i
= 0; i
< index
->cache_nr
; i
++)
147 index
->cache
[i
]->ce_flags
&= ~CE_UNPACKED
;
150 static int locate_in_src_index(struct cache_entry
*ce
,
151 struct unpack_trees_options
*o
)
153 struct index_state
*index
= o
->src_index
;
154 int len
= ce_namelen(ce
);
155 int pos
= index_name_pos(index
, ce
->name
, len
);
162 * We call unpack_index_entry() with an unmerged cache entry
163 * only in diff-index, and it wants a single callback. Skip
164 * the other unmerged entry with the same name.
166 static void mark_ce_used_same_name(struct cache_entry
*ce
,
167 struct unpack_trees_options
*o
)
169 struct index_state
*index
= o
->src_index
;
170 int len
= ce_namelen(ce
);
173 for (pos
= locate_in_src_index(ce
, o
); pos
< index
->cache_nr
; pos
++) {
174 struct cache_entry
*next
= index
->cache
[pos
];
175 if (len
!= ce_namelen(next
) ||
176 memcmp(ce
->name
, next
->name
, len
))
178 mark_ce_used(next
, o
);
182 static struct cache_entry
*next_cache_entry(struct unpack_trees_options
*o
)
184 const struct index_state
*index
= o
->src_index
;
185 int pos
= o
->cache_bottom
;
187 while (pos
< index
->cache_nr
) {
188 struct cache_entry
*ce
= index
->cache
[pos
];
189 if (!(ce
->ce_flags
& CE_UNPACKED
))
196 static void add_same_unmerged(struct cache_entry
*ce
,
197 struct unpack_trees_options
*o
)
199 struct index_state
*index
= o
->src_index
;
200 int len
= ce_namelen(ce
);
201 int pos
= index_name_pos(index
, ce
->name
, len
);
204 die("programming error in a caller of mark_ce_used_same_name");
205 for (pos
= -pos
- 1; pos
< index
->cache_nr
; pos
++) {
206 struct cache_entry
*next
= index
->cache
[pos
];
207 if (len
!= ce_namelen(next
) ||
208 memcmp(ce
->name
, next
->name
, len
))
210 add_entry(o
, next
, 0, 0);
211 mark_ce_used(next
, o
);
215 static int unpack_index_entry(struct cache_entry
*ce
,
216 struct unpack_trees_options
*o
)
218 struct cache_entry
*src
[5] = { ce
, NULL
, };
223 if (o
->skip_unmerged
) {
224 add_entry(o
, ce
, 0, 0);
228 ret
= call_unpack_fn(src
, o
);
230 mark_ce_used_same_name(ce
, o
);
234 static int find_cache_pos(struct traverse_info
*, const struct name_entry
*);
236 static void restore_cache_bottom(struct traverse_info
*info
, int bottom
)
238 struct unpack_trees_options
*o
= info
->data
;
240 if (o
->diff_index_cached
)
242 o
->cache_bottom
= bottom
;
245 static int switch_cache_bottom(struct traverse_info
*info
)
247 struct unpack_trees_options
*o
= info
->data
;
250 if (o
->diff_index_cached
)
252 ret
= o
->cache_bottom
;
253 pos
= find_cache_pos(info
->prev
, &info
->name
);
256 o
->cache_bottom
= -2 - pos
;
258 o
->cache_bottom
= o
->src_index
->cache_nr
;
262 static int traverse_trees_recursive(int n
, unsigned long dirmask
, unsigned long df_conflicts
, struct name_entry
*names
, struct traverse_info
*info
)
265 struct tree_desc t
[MAX_UNPACK_TREES
];
266 struct traverse_info newinfo
;
267 struct name_entry
*p
;
276 newinfo
.pathlen
+= tree_entry_len(p
->path
, p
->sha1
) + 1;
277 newinfo
.conflicts
|= df_conflicts
;
279 for (i
= 0; i
< n
; i
++, dirmask
>>= 1) {
280 const unsigned char *sha1
= NULL
;
282 sha1
= names
[i
].sha1
;
283 fill_tree_descriptor(t
+i
, sha1
);
286 bottom
= switch_cache_bottom(&newinfo
);
287 ret
= traverse_trees(n
, t
, &newinfo
);
288 restore_cache_bottom(&newinfo
, bottom
);
293 * Compare the traverse-path to the cache entry without actually
294 * having to generate the textual representation of the traverse
297 * NOTE! This *only* compares up to the size of the traverse path
298 * itself - the caller needs to do the final check for the cache
299 * entry having more data at the end!
301 static int do_compare_entry(const struct cache_entry
*ce
, const struct traverse_info
*info
, const struct name_entry
*n
)
303 int len
, pathlen
, ce_len
;
307 int cmp
= do_compare_entry(ce
, info
->prev
, &info
->name
);
311 pathlen
= info
->pathlen
;
312 ce_len
= ce_namelen(ce
);
314 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
315 if (ce_len
< pathlen
)
319 ce_name
= ce
->name
+ pathlen
;
321 len
= tree_entry_len(n
->path
, n
->sha1
);
322 return df_name_compare(ce_name
, ce_len
, S_IFREG
, n
->path
, len
, n
->mode
);
325 static int compare_entry(const struct cache_entry
*ce
, const struct traverse_info
*info
, const struct name_entry
*n
)
327 int cmp
= do_compare_entry(ce
, info
, n
);
332 * Even if the beginning compared identically, the ce should
333 * compare as bigger than a directory leading up to it!
335 return ce_namelen(ce
) > traverse_path_len(info
, n
);
338 static int ce_in_traverse_path(const struct cache_entry
*ce
,
339 const struct traverse_info
*info
)
343 if (do_compare_entry(ce
, info
->prev
, &info
->name
))
346 * If ce (blob) is the same name as the path (which is a tree
347 * we will be descending into), it won't be inside it.
349 return (info
->pathlen
< ce_namelen(ce
));
352 static struct cache_entry
*create_ce_entry(const struct traverse_info
*info
, const struct name_entry
*n
, int stage
)
354 int len
= traverse_path_len(info
, n
);
355 struct cache_entry
*ce
= xcalloc(1, cache_entry_size(len
));
357 ce
->ce_mode
= create_ce_mode(n
->mode
);
358 ce
->ce_flags
= create_ce_flags(len
, stage
);
359 hashcpy(ce
->sha1
, n
->sha1
);
360 make_traverse_path(ce
->name
, info
, n
);
365 static int unpack_nondirectories(int n
, unsigned long mask
,
366 unsigned long dirmask
,
367 struct cache_entry
**src
,
368 const struct name_entry
*names
,
369 const struct traverse_info
*info
)
372 struct unpack_trees_options
*o
= info
->data
;
373 unsigned long conflicts
;
375 /* Do we have *only* directories? Nothing to do */
376 if (mask
== dirmask
&& !src
[0])
379 conflicts
= info
->conflicts
;
382 conflicts
|= dirmask
;
385 * Ok, we've filled in up to any potential index entry in src[0],
388 for (i
= 0; i
< n
; i
++) {
390 unsigned int bit
= 1ul << i
;
391 if (conflicts
& bit
) {
392 src
[i
+ o
->merge
] = o
->df_conflict_entry
;
399 else if (i
+ 1 < o
->head_idx
)
401 else if (i
+ 1 > o
->head_idx
)
405 src
[i
+ o
->merge
] = create_ce_entry(info
, names
+ i
, stage
);
409 return call_unpack_fn(src
, o
);
411 for (i
= 0; i
< n
; i
++)
412 if (src
[i
] && src
[i
] != o
->df_conflict_entry
)
413 add_entry(o
, src
[i
], 0, 0);
417 static int unpack_failed(struct unpack_trees_options
*o
, const char *message
)
419 discard_index(&o
->result
);
422 return error("%s", message
);
428 /* NEEDSWORK: give this a better name and share with tree-walk.c */
429 static int name_compare(const char *a
, int a_len
,
430 const char *b
, int b_len
)
432 int len
= (a_len
< b_len
) ? a_len
: b_len
;
433 int cmp
= memcmp(a
, b
, len
);
436 return (a_len
- b_len
);
440 * The tree traversal is looking at name p. If we have a matching entry,
441 * return it. If name p is a directory in the index, do not return
442 * anything, as we will want to match it when the traversal descends into
445 static int find_cache_pos(struct traverse_info
*info
,
446 const struct name_entry
*p
)
449 struct unpack_trees_options
*o
= info
->data
;
450 struct index_state
*index
= o
->src_index
;
451 int pfxlen
= info
->pathlen
;
452 int p_len
= tree_entry_len(p
->path
, p
->sha1
);
454 for (pos
= o
->cache_bottom
; pos
< index
->cache_nr
; pos
++) {
455 struct cache_entry
*ce
= index
->cache
[pos
];
456 const char *ce_name
, *ce_slash
;
459 if (!ce_in_traverse_path(ce
, info
))
461 if (ce
->ce_flags
& CE_UNPACKED
)
463 ce_name
= ce
->name
+ pfxlen
;
464 ce_slash
= strchr(ce_name
, '/');
466 ce_len
= ce_slash
- ce_name
;
468 ce_len
= ce_namelen(ce
) - pfxlen
;
469 cmp
= name_compare(p
->path
, p_len
, ce_name
, ce_len
);
471 * Exact match; if we have a directory we need to
472 * delay returning it.
475 return ce_slash
? -2 - pos
: pos
;
477 continue; /* keep looking */
479 * ce_name sorts after p->path; could it be that we
480 * have files under p->path directory in the index?
481 * E.g. ce_name == "t-i", and p->path == "t"; we may
482 * have "t/a" in the index.
484 if (p_len
< ce_len
&& !memcmp(ce_name
, p
->path
, p_len
) &&
485 ce_name
[p_len
] < '/')
486 continue; /* keep looking */
492 static struct cache_entry
*find_cache_entry(struct traverse_info
*info
,
493 const struct name_entry
*p
)
495 int pos
= find_cache_pos(info
, p
);
496 struct unpack_trees_options
*o
= info
->data
;
499 return o
->src_index
->cache
[pos
];
504 static int unpack_callback(int n
, unsigned long mask
, unsigned long dirmask
, struct name_entry
*names
, struct traverse_info
*info
)
506 struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
507 struct unpack_trees_options
*o
= info
->data
;
508 const struct name_entry
*p
= names
;
510 /* Find first entry with a real name (we could use "mask" too) */
514 /* Are we supposed to look at the index too? */
518 struct cache_entry
*ce
;
520 if (o
->diff_index_cached
)
521 ce
= next_cache_entry(o
);
523 ce
= find_cache_entry(info
, p
);
527 cmp
= compare_entry(ce
, info
, p
);
529 if (unpack_index_entry(ce
, o
) < 0)
530 return unpack_failed(o
, NULL
);
536 * If we skip unmerged index
537 * entries, we'll skip this
538 * entry *and* the tree
539 * entries associated with it!
541 if (o
->skip_unmerged
) {
542 add_same_unmerged(ce
, o
);
552 if (unpack_nondirectories(n
, mask
, dirmask
, src
, names
, info
) < 0)
556 if (ce_stage(src
[0]))
557 mark_ce_used_same_name(src
[0], o
);
559 mark_ce_used(src
[0], o
);
562 /* Now handle any directories.. */
564 unsigned long conflicts
= mask
& ~dirmask
;
571 /* special case: "diff-index --cached" looking at a tree */
572 if (o
->diff_index_cached
&&
573 n
== 1 && dirmask
== 1 && S_ISDIR(names
->mode
)) {
575 matches
= cache_tree_matches_traversal(o
->src_index
->cache_tree
,
578 * Everything under the name matches; skip the
579 * entire hierarchy. diff_index_cached codepath
580 * special cases D/F conflicts in such a way that
581 * it does not do any look-ahead, so this is safe.
584 o
->cache_bottom
+= matches
;
589 if (traverse_trees_recursive(n
, dirmask
, conflicts
,
599 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the
600 * resulting index, -2 on failure to reflect the changes to the work tree.
602 int unpack_trees(unsigned len
, struct tree_desc
*t
, struct unpack_trees_options
*o
)
605 static struct cache_entry
*dfc
;
607 if (len
> MAX_UNPACK_TREES
)
608 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES
);
609 memset(&state
, 0, sizeof(state
));
613 state
.refresh_cache
= 1;
615 memset(&o
->result
, 0, sizeof(o
->result
));
616 o
->result
.initialized
= 1;
617 o
->result
.timestamp
.sec
= o
->src_index
->timestamp
.sec
;
618 o
->result
.timestamp
.nsec
= o
->src_index
->timestamp
.nsec
;
620 mark_all_ce_unused(o
->src_index
);
623 dfc
= xcalloc(1, cache_entry_size(0));
624 o
->df_conflict_entry
= dfc
;
627 const char *prefix
= o
->prefix
? o
->prefix
: "";
628 struct traverse_info info
;
630 setup_traverse_info(&info
, prefix
);
631 info
.fn
= unpack_callback
;
636 * Unpack existing index entries that sort before the
637 * prefix the tree is spliced into. Note that o->merge
638 * is always true in this case.
641 struct cache_entry
*ce
= next_cache_entry(o
);
644 if (ce_in_traverse_path(ce
, &info
))
646 if (unpack_index_entry(ce
, o
) < 0)
651 if (traverse_trees(len
, t
, &info
) < 0)
655 /* Any left-over entries in the index? */
658 struct cache_entry
*ce
= next_cache_entry(o
);
661 if (unpack_index_entry(ce
, o
) < 0)
665 mark_all_ce_unused(o
->src_index
);
667 if (o
->trivial_merges_only
&& o
->nontrivial_merge
)
668 return unpack_failed(o
, "Merge requires file-level merging");
671 ret
= check_updates(o
) ? (-2) : 0;
673 *o
->dst_index
= o
->result
;
677 mark_all_ce_unused(o
->src_index
);
678 return unpack_failed(o
, NULL
);
681 /* Here come the merge functions */
683 static int reject_merge(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
685 return error(ERRORMSG(o
, would_overwrite
), ce
->name
);
688 static int same(struct cache_entry
*a
, struct cache_entry
*b
)
694 return a
->ce_mode
== b
->ce_mode
&&
695 !hashcmp(a
->sha1
, b
->sha1
);
700 * When a CE gets turned into an unmerged entry, we
701 * want it to be up-to-date
703 static int verify_uptodate(struct cache_entry
*ce
,
704 struct unpack_trees_options
*o
)
708 if (o
->index_only
|| o
->reset
|| ce_uptodate(ce
))
711 if (!lstat(ce
->name
, &st
)) {
712 unsigned changed
= ie_match_stat(o
->src_index
, ce
, &st
, CE_MATCH_IGNORE_VALID
);
716 * NEEDSWORK: the current default policy is to allow
717 * submodule to be out of sync wrt the supermodule
718 * index. This needs to be tightened later for
719 * submodules that are marked to be automatically
722 if (S_ISGITLINK(ce
->ce_mode
))
728 return o
->gently
? -1 :
729 error(ERRORMSG(o
, not_uptodate_file
), ce
->name
);
732 static void invalidate_ce_path(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
735 cache_tree_invalidate_path(o
->src_index
->cache_tree
, ce
->name
);
739 * Check that checking out ce->sha1 in subdir ce->name is not
740 * going to overwrite any working files.
742 * Currently, git does not checkout subprojects during a superproject
743 * checkout, so it is not going to overwrite anything.
745 static int verify_clean_submodule(struct cache_entry
*ce
, const char *action
,
746 struct unpack_trees_options
*o
)
751 static int verify_clean_subdirectory(struct cache_entry
*ce
, const char *action
,
752 struct unpack_trees_options
*o
)
755 * we are about to extract "ce->name"; we would not want to lose
756 * anything in the existing directory there.
763 unsigned char sha1
[20];
765 if (S_ISGITLINK(ce
->ce_mode
) &&
766 resolve_gitlink_ref(ce
->name
, "HEAD", sha1
) == 0) {
767 /* If we are not going to update the submodule, then
770 if (!hashcmp(sha1
, ce
->sha1
))
772 return verify_clean_submodule(ce
, action
, o
);
776 * First let's make sure we do not have a local modification
779 namelen
= strlen(ce
->name
);
780 for (i
= locate_in_src_index(ce
, o
);
781 i
< o
->src_index
->cache_nr
;
783 struct cache_entry
*ce2
= o
->src_index
->cache
[i
];
784 int len
= ce_namelen(ce2
);
786 strncmp(ce
->name
, ce2
->name
, namelen
) ||
787 ce2
->name
[namelen
] != '/')
790 * ce2->name is an entry in the subdirectory to be
793 if (!ce_stage(ce2
)) {
794 if (verify_uptodate(ce2
, o
))
796 add_entry(o
, ce2
, CE_REMOVE
, 0);
797 mark_ce_used(ce2
, o
);
803 * Then we need to make sure that we do not lose a locally
804 * present file that is not ignored.
806 pathbuf
= xmalloc(namelen
+ 2);
807 memcpy(pathbuf
, ce
->name
, namelen
);
808 strcpy(pathbuf
+namelen
, "/");
810 memset(&d
, 0, sizeof(d
));
812 d
.exclude_per_dir
= o
->dir
->exclude_per_dir
;
813 i
= read_directory(&d
, pathbuf
, namelen
+1, NULL
);
815 return o
->gently
? -1 :
816 error(ERRORMSG(o
, not_uptodate_dir
), ce
->name
);
822 * This gets called when there was no index entry for the tree entry 'dst',
823 * but we found a file in the working tree that 'lstat()' said was fine,
824 * and we're on a case-insensitive filesystem.
826 * See if we can find a case-insensitive match in the index that also
827 * matches the stat information, and assume it's that other file!
829 static int icase_exists(struct unpack_trees_options
*o
, struct cache_entry
*dst
, struct stat
*st
)
831 struct cache_entry
*src
;
833 src
= index_name_exists(o
->src_index
, dst
->name
, ce_namelen(dst
), 1);
834 return src
&& !ie_match_stat(o
->src_index
, src
, st
, CE_MATCH_IGNORE_VALID
);
838 * We do not want to remove or overwrite a working tree file that
839 * is not tracked, unless it is ignored.
841 static int verify_absent(struct cache_entry
*ce
, const char *action
,
842 struct unpack_trees_options
*o
)
846 if (o
->index_only
|| o
->reset
|| !o
->update
)
849 if (has_symlink_or_noent_leading_path(ce
->name
, ce_namelen(ce
)))
852 if (!lstat(ce
->name
, &st
)) {
853 int dtype
= ce_to_dtype(ce
);
854 struct cache_entry
*result
;
857 * It may be that the 'lstat()' succeeded even though
858 * target 'ce' was absent, because there is an old
859 * entry that is different only in case..
861 * Ignore that lstat() if it matches.
863 if (ignore_case
&& icase_exists(o
, ce
, &st
))
866 if (o
->dir
&& excluded(o
->dir
, ce
->name
, &dtype
))
868 * ce->name is explicitly excluded, so it is Ok to
872 if (S_ISDIR(st
.st_mode
)) {
874 * We are checking out path "foo" and
875 * found "foo/." in the working tree.
876 * This is tricky -- if we have modified
877 * files that are in "foo/" we would lose
880 if (verify_clean_subdirectory(ce
, action
, o
) < 0)
886 * The previous round may already have decided to
887 * delete this path, which is in a subdirectory that
888 * is being replaced with a blob.
890 result
= index_name_exists(&o
->result
, ce
->name
, ce_namelen(ce
), 0);
892 if (result
->ce_flags
& CE_REMOVE
)
896 return o
->gently
? -1 :
897 error(ERRORMSG(o
, would_lose_untracked
), ce
->name
, action
);
902 static int merged_entry(struct cache_entry
*merge
, struct cache_entry
*old
,
903 struct unpack_trees_options
*o
)
905 int update
= CE_UPDATE
;
909 * See if we can re-use the old CE directly?
910 * That way we get the uptodate stat info.
912 * This also removes the UPDATE flag on a match; otherwise
913 * we will end up overwriting local changes in the work tree.
915 if (same(old
, merge
)) {
916 copy_cache_entry(merge
, old
);
919 if (verify_uptodate(old
, o
))
921 invalidate_ce_path(old
, o
);
925 if (verify_absent(merge
, "overwritten", o
))
927 invalidate_ce_path(merge
, o
);
930 add_entry(o
, merge
, update
, CE_STAGEMASK
);
934 static int deleted_entry(struct cache_entry
*ce
, struct cache_entry
*old
,
935 struct unpack_trees_options
*o
)
937 /* Did it exist in the index? */
939 if (verify_absent(ce
, "removed", o
))
943 if (verify_uptodate(old
, o
))
945 add_entry(o
, ce
, CE_REMOVE
, 0);
946 invalidate_ce_path(ce
, o
);
950 static int keep_entry(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
952 add_entry(o
, ce
, 0, 0);
957 static void show_stage_entry(FILE *o
,
958 const char *label
, const struct cache_entry
*ce
)
961 fprintf(o
, "%s (missing)\n", label
);
963 fprintf(o
, "%s%06o %s %d\t%s\n",
966 sha1_to_hex(ce
->sha1
),
972 int threeway_merge(struct cache_entry
**stages
, struct unpack_trees_options
*o
)
974 struct cache_entry
*index
;
975 struct cache_entry
*head
;
976 struct cache_entry
*remote
= stages
[o
->head_idx
+ 1];
979 int remote_match
= 0;
981 int df_conflict_head
= 0;
982 int df_conflict_remote
= 0;
984 int any_anc_missing
= 0;
985 int no_anc_exists
= 1;
988 for (i
= 1; i
< o
->head_idx
; i
++) {
989 if (!stages
[i
] || stages
[i
] == o
->df_conflict_entry
)
996 head
= stages
[o
->head_idx
];
998 if (head
== o
->df_conflict_entry
) {
999 df_conflict_head
= 1;
1003 if (remote
== o
->df_conflict_entry
) {
1004 df_conflict_remote
= 1;
1009 * First, if there's a #16 situation, note that to prevent #13
1012 if (!same(remote
, head
)) {
1013 for (i
= 1; i
< o
->head_idx
; i
++) {
1014 if (same(stages
[i
], head
)) {
1017 if (same(stages
[i
], remote
)) {
1024 * We start with cases where the index is allowed to match
1025 * something other than the head: #14(ALT) and #2ALT, where it
1026 * is permitted to match the result instead.
1028 /* #14, #14ALT, #2ALT */
1029 if (remote
&& !df_conflict_head
&& head_match
&& !remote_match
) {
1030 if (index
&& !same(index
, remote
) && !same(index
, head
))
1031 return o
->gently
? -1 : reject_merge(index
, o
);
1032 return merged_entry(remote
, index
, o
);
1035 * If we have an entry in the index cache, then we want to
1036 * make sure that it matches head.
1038 if (index
&& !same(index
, head
))
1039 return o
->gently
? -1 : reject_merge(index
, o
);
1043 if (same(head
, remote
))
1044 return merged_entry(head
, index
, o
);
1046 if (!df_conflict_remote
&& remote_match
&& !head_match
)
1047 return merged_entry(head
, index
, o
);
1051 if (!head
&& !remote
&& any_anc_missing
)
1055 * Under the "aggressive" rule, we resolve mostly trivial
1056 * cases that we historically had git-merge-one-file resolve.
1058 if (o
->aggressive
) {
1059 int head_deleted
= !head
;
1060 int remote_deleted
= !remote
;
1061 struct cache_entry
*ce
= NULL
;
1070 for (i
= 1; i
< o
->head_idx
; i
++) {
1071 if (stages
[i
] && stages
[i
] != o
->df_conflict_entry
) {
1080 * Deleted in one and unchanged in the other.
1082 if ((head_deleted
&& remote_deleted
) ||
1083 (head_deleted
&& remote
&& remote_match
) ||
1084 (remote_deleted
&& head
&& head_match
)) {
1086 return deleted_entry(index
, index
, o
);
1087 if (ce
&& !head_deleted
) {
1088 if (verify_absent(ce
, "removed", o
))
1094 * Added in both, identically.
1096 if (no_anc_exists
&& head
&& remote
&& same(head
, remote
))
1097 return merged_entry(head
, index
, o
);
1101 /* Below are "no merge" cases, which require that the index be
1102 * up-to-date to avoid the files getting overwritten with
1103 * conflict resolution files.
1106 if (verify_uptodate(index
, o
))
1110 o
->nontrivial_merge
= 1;
1112 /* #2, #3, #4, #6, #7, #9, #10, #11. */
1114 if (!head_match
|| !remote_match
) {
1115 for (i
= 1; i
< o
->head_idx
; i
++) {
1116 if (stages
[i
] && stages
[i
] != o
->df_conflict_entry
) {
1117 keep_entry(stages
[i
], o
);
1125 fprintf(stderr
, "read-tree: warning #16 detected\n");
1126 show_stage_entry(stderr
, "head ", stages
[head_match
]);
1127 show_stage_entry(stderr
, "remote ", stages
[remote_match
]);
1130 if (head
) { count
+= keep_entry(head
, o
); }
1131 if (remote
) { count
+= keep_entry(remote
, o
); }
1138 * The rule is to "carry forward" what is in the index without losing
1139 * information across a "fast-forward", favoring a successful merge
1140 * over a merge failure when it makes sense. For details of the
1141 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
1144 int twoway_merge(struct cache_entry
**src
, struct unpack_trees_options
*o
)
1146 struct cache_entry
*current
= src
[0];
1147 struct cache_entry
*oldtree
= src
[1];
1148 struct cache_entry
*newtree
= src
[2];
1150 if (o
->merge_size
!= 2)
1151 return error("Cannot do a twoway merge of %d trees",
1154 if (oldtree
== o
->df_conflict_entry
)
1156 if (newtree
== o
->df_conflict_entry
)
1160 if ((!oldtree
&& !newtree
) || /* 4 and 5 */
1161 (!oldtree
&& newtree
&&
1162 same(current
, newtree
)) || /* 6 and 7 */
1163 (oldtree
&& newtree
&&
1164 same(oldtree
, newtree
)) || /* 14 and 15 */
1165 (oldtree
&& newtree
&&
1166 !same(oldtree
, newtree
) && /* 18 and 19 */
1167 same(current
, newtree
))) {
1168 return keep_entry(current
, o
);
1170 else if (oldtree
&& !newtree
&& same(current
, oldtree
)) {
1172 return deleted_entry(oldtree
, current
, o
);
1174 else if (oldtree
&& newtree
&&
1175 same(current
, oldtree
) && !same(current
, newtree
)) {
1177 return merged_entry(newtree
, current
, o
);
1180 /* all other failures */
1182 return o
->gently
? -1 : reject_merge(oldtree
, o
);
1184 return o
->gently
? -1 : reject_merge(current
, o
);
1186 return o
->gently
? -1 : reject_merge(newtree
, o
);
1191 if (oldtree
&& !o
->initial_checkout
) {
1193 * deletion of the path was staged;
1195 if (same(oldtree
, newtree
))
1197 return reject_merge(oldtree
, o
);
1199 return merged_entry(newtree
, current
, o
);
1201 return deleted_entry(oldtree
, current
, o
);
1207 * Keep the index entries at stage0, collapse stage1 but make sure
1208 * stage0 does not have anything there.
1210 int bind_merge(struct cache_entry
**src
,
1211 struct unpack_trees_options
*o
)
1213 struct cache_entry
*old
= src
[0];
1214 struct cache_entry
*a
= src
[1];
1216 if (o
->merge_size
!= 1)
1217 return error("Cannot do a bind merge of %d trees\n",
1220 return o
->gently
? -1 :
1221 error(ERRORMSG(o
, bind_overlap
), a
->name
, old
->name
);
1223 return keep_entry(old
, o
);
1225 return merged_entry(a
, NULL
, o
);
1232 * - take the stat information from stage0, take the data from stage1
1234 int oneway_merge(struct cache_entry
**src
, struct unpack_trees_options
*o
)
1236 struct cache_entry
*old
= src
[0];
1237 struct cache_entry
*a
= src
[1];
1239 if (o
->merge_size
!= 1)
1240 return error("Cannot do a oneway merge of %d trees",
1243 if (!a
|| a
== o
->df_conflict_entry
)
1244 return deleted_entry(old
, old
, o
);
1246 if (old
&& same(old
, a
)) {
1248 if (o
->reset
&& !ce_uptodate(old
)) {
1250 if (lstat(old
->name
, &st
) ||
1251 ie_match_stat(o
->src_index
, old
, &st
, CE_MATCH_IGNORE_VALID
))
1252 update
|= CE_UPDATE
;
1254 add_entry(o
, old
, update
, 0);
1257 return merged_entry(a
, old
, o
);