2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
12 #include "cache-tree.h"
18 static int update
= 0;
19 static int index_only
= 0;
20 static int nontrivial_merge
= 0;
21 static int trivial_merges_only
= 0;
22 static int aggressive
= 0;
23 static int verbose_update
= 0;
24 static volatile int progress_update
= 0;
26 static int head_idx
= -1;
27 static int merge_size
= 0;
29 static struct object_list
*trees
= NULL
;
31 static struct cache_entry df_conflict_entry
= {
34 static struct tree_entry_list df_conflict_list
= {
36 .next
= &df_conflict_list
39 typedef int (*merge_fn_t
)(struct cache_entry
**src
);
41 static int entcmp(char *name1
, int dir1
, char *name2
, int dir2
)
43 int len1
= strlen(name1
);
44 int len2
= strlen(name2
);
45 int len
= len1
< len2
? len1
: len2
;
46 int ret
= memcmp(name1
, name2
, len
);
56 ret
= (c1
< c2
) ? -1 : (c1
> c2
) ? 1 : 0;
62 static int unpack_trees_rec(struct tree_entry_list
**posns
, int len
,
63 const char *base
, merge_fn_t fn
, int *indpos
)
65 int baselen
= strlen(base
);
66 int src_size
= len
+ 1;
73 struct tree_entry_list
**subposns
;
74 struct cache_entry
**src
;
80 /* Find the first name in the input. */
86 if (merge
&& *indpos
< active_nr
) {
87 /* This is a bit tricky: */
88 /* If the index has a subdirectory (with
89 * contents) as the first name, it'll get a
90 * filename like "foo/bar". But that's after
91 * "foo", so the entry in trees will get
92 * handled first, at which point we'll go into
93 * "foo", and deal with "bar" from the index,
94 * because the base will be "foo/". The only
95 * way we can actually have "foo/bar" first of
96 * all the things is if the trees don't
97 * contain "foo" at all, in which case we'll
98 * handle "foo/bar" without going into the
99 * directory, but that's fine (and will return
100 * an error anyway, with the added unknown
104 cache_name
= active_cache
[*indpos
]->name
;
105 if (strlen(cache_name
) > baselen
&&
106 !memcmp(cache_name
, base
, baselen
)) {
107 cache_name
+= baselen
;
116 printf("index %s\n", first
);
118 for (i
= 0; i
< len
; i
++) {
119 if (!posns
[i
] || posns
[i
] == &df_conflict_list
)
122 printf("%d %s\n", i
+ 1, posns
[i
]->name
);
124 if (!first
|| entcmp(first
, firstdir
,
126 posns
[i
]->directory
) > 0) {
127 first
= posns
[i
]->name
;
128 firstdir
= posns
[i
]->directory
;
131 /* No name means we're done */
135 pathlen
= strlen(first
);
136 ce_size
= cache_entry_size(baselen
+ pathlen
);
138 src
= xcalloc(src_size
, sizeof(struct cache_entry
*));
140 subposns
= xcalloc(len
, sizeof(struct tree_list_entry
*));
142 if (cache_name
&& !strcmp(cache_name
, first
)) {
144 src
[0] = active_cache
[*indpos
];
145 remove_cache_entry_at(*indpos
);
148 for (i
= 0; i
< len
; i
++) {
149 struct cache_entry
*ce
;
152 (posns
[i
] != &df_conflict_list
&&
153 strcmp(first
, posns
[i
]->name
))) {
157 if (posns
[i
] == &df_conflict_list
) {
158 src
[i
+ merge
] = &df_conflict_entry
;
162 if (posns
[i
]->directory
) {
164 parse_tree(posns
[i
]->item
.tree
);
165 subposns
[i
] = posns
[i
]->item
.tree
->entries
;
166 posns
[i
] = posns
[i
]->next
;
167 src
[i
+ merge
] = &df_conflict_entry
;
173 else if (i
+ 1 < head_idx
)
175 else if (i
+ 1 > head_idx
)
180 ce
= xcalloc(1, ce_size
);
181 ce
->ce_mode
= create_ce_mode(posns
[i
]->mode
);
182 ce
->ce_flags
= create_ce_flags(baselen
+ pathlen
,
184 memcpy(ce
->name
, base
, baselen
);
185 memcpy(ce
->name
+ baselen
, first
, pathlen
+ 1);
189 memcpy(ce
->sha1
, posns
[i
]->item
.any
->sha1
, 20);
191 subposns
[i
] = &df_conflict_list
;
192 posns
[i
] = posns
[i
]->next
;
199 printf("%s:\n", first
);
200 for (i
= 0; i
< src_size
; i
++) {
203 printf("%s\n", sha1_to_hex(src
[i
]->sha1
));
211 printf("Added %d entries\n", ret
);
215 for (i
= 0; i
< src_size
; i
++) {
217 add_cache_entry(src
[i
], ADD_CACHE_OK_TO_ADD
|ADD_CACHE_SKIP_DFCHECK
);
223 char *newbase
= xmalloc(baselen
+ 2 + pathlen
);
224 memcpy(newbase
, base
, baselen
);
225 memcpy(newbase
+ baselen
, first
, pathlen
);
226 newbase
[baselen
+ pathlen
] = '/';
227 newbase
[baselen
+ pathlen
+ 1] = '\0';
228 if (unpack_trees_rec(subposns
, len
, newbase
, fn
,
238 static void reject_merge(struct cache_entry
*ce
)
240 die("Entry '%s' would be overwritten by merge. Cannot merge.",
244 /* Unlink the last component and attempt to remove leading
245 * directories, in case this unlink is the removal of the
246 * last entry in the directory -- empty directories are removed.
248 static void unlink_entry(char *name
)
257 cp
= strrchr(name
, '/');
264 status
= rmdir(name
);
273 static void progress_interval(int signum
)
278 static void setup_progress_signal(void)
283 memset(&sa
, 0, sizeof(sa
));
284 sa
.sa_handler
= progress_interval
;
285 sigemptyset(&sa
.sa_mask
);
286 sa
.sa_flags
= SA_RESTART
;
287 sigaction(SIGALRM
, &sa
, NULL
);
289 v
.it_interval
.tv_sec
= 1;
290 v
.it_interval
.tv_usec
= 0;
291 v
.it_value
= v
.it_interval
;
292 setitimer(ITIMER_REAL
, &v
, NULL
);
295 static void check_updates(struct cache_entry
**src
, int nr
)
297 static struct checkout state
= {
303 unsigned short mask
= htons(CE_UPDATE
);
304 unsigned last_percent
= 200, cnt
= 0, total
= 0;
306 if (update
&& verbose_update
) {
307 for (total
= cnt
= 0; cnt
< nr
; cnt
++) {
308 struct cache_entry
*ce
= src
[cnt
];
309 if (!ce
->ce_mode
|| ce
->ce_flags
& mask
)
313 /* Don't bother doing this for very small updates */
318 fprintf(stderr
, "Checking files out...\n");
319 setup_progress_signal();
326 struct cache_entry
*ce
= *src
++;
329 if (!ce
->ce_mode
|| ce
->ce_flags
& mask
) {
332 percent
= (cnt
* 100) / total
;
333 if (percent
!= last_percent
||
335 fprintf(stderr
, "%4u%% (%u/%u) done\r",
336 percent
, cnt
, total
);
337 last_percent
= percent
;
343 unlink_entry(ce
->name
);
346 if (ce
->ce_flags
& mask
) {
347 ce
->ce_flags
&= ~mask
;
349 checkout_entry(ce
, &state
, NULL
);
353 signal(SIGALRM
, SIG_IGN
);
358 static int unpack_trees(merge_fn_t fn
)
361 unsigned len
= object_list_length(trees
);
362 struct tree_entry_list
**posns
;
364 struct object_list
*posn
= trees
;
368 posns
= xmalloc(len
* sizeof(struct tree_entry_list
*));
369 for (i
= 0; i
< len
; i
++) {
370 posns
[i
] = ((struct tree
*) posn
->item
)->entries
;
373 if (unpack_trees_rec(posns
, len
, "", fn
, &indpos
))
377 if (trivial_merges_only
&& nontrivial_merge
)
378 die("Merge requires file-level merging");
380 check_updates(active_cache
, active_nr
);
384 static int list_tree(unsigned char *sha1
)
386 struct tree
*tree
= parse_tree_indirect(sha1
);
389 object_list_append(&tree
->object
, &trees
);
393 static int same(struct cache_entry
*a
, struct cache_entry
*b
)
399 return a
->ce_mode
== b
->ce_mode
&&
400 !memcmp(a
->sha1
, b
->sha1
, 20);
405 * When a CE gets turned into an unmerged entry, we
406 * want it to be up-to-date
408 static void verify_uptodate(struct cache_entry
*ce
)
412 if (index_only
|| reset
)
415 if (!lstat(ce
->name
, &st
)) {
416 unsigned changed
= ce_match_stat(ce
, &st
, 1);
422 ce
->ce_flags
|= htons(CE_UPDATE
);
427 die("Entry '%s' not uptodate. Cannot merge.", ce
->name
);
430 static void invalidate_ce_path(struct cache_entry
*ce
)
433 cache_tree_invalidate_path(active_cache_tree
, ce
->name
);
437 * We do not want to remove or overwrite a working tree file that
440 static void verify_absent(const char *path
, const char *action
)
444 if (index_only
|| reset
|| !update
)
446 if (!lstat(path
, &st
))
447 die("Untracked working tree file '%s' "
448 "would be %s by merge.", path
, action
);
451 static int merged_entry(struct cache_entry
*merge
, struct cache_entry
*old
)
453 merge
->ce_flags
|= htons(CE_UPDATE
);
456 * See if we can re-use the old CE directly?
457 * That way we get the uptodate stat info.
459 * This also removes the UPDATE flag on
462 if (same(old
, merge
)) {
465 verify_uptodate(old
);
466 invalidate_ce_path(old
);
470 verify_absent(merge
->name
, "overwritten");
471 invalidate_ce_path(merge
);
474 merge
->ce_flags
&= ~htons(CE_STAGEMASK
);
475 add_cache_entry(merge
, ADD_CACHE_OK_TO_ADD
);
479 static int deleted_entry(struct cache_entry
*ce
, struct cache_entry
*old
)
482 verify_uptodate(old
);
484 verify_absent(ce
->name
, "removed");
486 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
);
487 invalidate_ce_path(ce
);
491 static int keep_entry(struct cache_entry
*ce
)
493 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
);
498 static void show_stage_entry(FILE *o
,
499 const char *label
, const struct cache_entry
*ce
)
502 fprintf(o
, "%s (missing)\n", label
);
504 fprintf(o
, "%s%06o %s %d\t%s\n",
507 sha1_to_hex(ce
->sha1
),
513 static int threeway_merge(struct cache_entry
**stages
)
515 struct cache_entry
*index
;
516 struct cache_entry
*head
;
517 struct cache_entry
*remote
= stages
[head_idx
+ 1];
520 int remote_match
= 0;
521 const char *path
= NULL
;
523 int df_conflict_head
= 0;
524 int df_conflict_remote
= 0;
526 int any_anc_missing
= 0;
527 int no_anc_exists
= 1;
530 for (i
= 1; i
< head_idx
; i
++) {
535 path
= stages
[i
]->name
;
541 head
= stages
[head_idx
];
543 if (head
== &df_conflict_entry
) {
544 df_conflict_head
= 1;
548 if (remote
== &df_conflict_entry
) {
549 df_conflict_remote
= 1;
560 /* First, if there's a #16 situation, note that to prevent #13
563 if (!same(remote
, head
)) {
564 for (i
= 1; i
< head_idx
; i
++) {
565 if (same(stages
[i
], head
)) {
568 if (same(stages
[i
], remote
)) {
574 /* We start with cases where the index is allowed to match
575 * something other than the head: #14(ALT) and #2ALT, where it
576 * is permitted to match the result instead.
578 /* #14, #14ALT, #2ALT */
579 if (remote
&& !df_conflict_head
&& head_match
&& !remote_match
) {
580 if (index
&& !same(index
, remote
) && !same(index
, head
))
582 return merged_entry(remote
, index
);
585 * If we have an entry in the index cache, then we want to
586 * make sure that it matches head.
588 if (index
&& !same(index
, head
)) {
594 if (same(head
, remote
))
595 return merged_entry(head
, index
);
597 if (!df_conflict_remote
&& remote_match
&& !head_match
)
598 return merged_entry(head
, index
);
602 if (!head
&& !remote
&& any_anc_missing
)
605 /* Under the new "aggressive" rule, we resolve mostly trivial
606 * cases that we historically had git-merge-one-file resolve.
609 int head_deleted
= !head
&& !df_conflict_head
;
610 int remote_deleted
= !remote
&& !df_conflict_remote
;
613 * Deleted in one and unchanged in the other.
615 if ((head_deleted
&& remote_deleted
) ||
616 (head_deleted
&& remote
&& remote_match
) ||
617 (remote_deleted
&& head
&& head_match
)) {
619 return deleted_entry(index
, index
);
621 verify_absent(path
, "removed");
625 * Added in both, identically.
627 if (no_anc_exists
&& head
&& remote
&& same(head
, remote
))
628 return merged_entry(head
, index
);
632 /* Below are "no merge" cases, which require that the index be
633 * up-to-date to avoid the files getting overwritten with
634 * conflict resolution files.
637 verify_uptodate(index
);
640 verify_absent(path
, "overwritten");
642 nontrivial_merge
= 1;
644 /* #2, #3, #4, #6, #7, #9, #11. */
646 if (!head_match
|| !remote_match
) {
647 for (i
= 1; i
< head_idx
; i
++) {
649 keep_entry(stages
[i
]);
657 fprintf(stderr
, "read-tree: warning #16 detected\n");
658 show_stage_entry(stderr
, "head ", stages
[head_match
]);
659 show_stage_entry(stderr
, "remote ", stages
[remote_match
]);
662 if (head
) { count
+= keep_entry(head
); }
663 if (remote
) { count
+= keep_entry(remote
); }
670 * The rule is to "carry forward" what is in the index without losing
671 * information across a "fast forward", favoring a successful merge
672 * over a merge failure when it makes sense. For details of the
673 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
676 static int twoway_merge(struct cache_entry
**src
)
678 struct cache_entry
*current
= src
[0];
679 struct cache_entry
*oldtree
= src
[1], *newtree
= src
[2];
682 return error("Cannot do a twoway merge of %d trees",
686 if ((!oldtree
&& !newtree
) || /* 4 and 5 */
687 (!oldtree
&& newtree
&&
688 same(current
, newtree
)) || /* 6 and 7 */
689 (oldtree
&& newtree
&&
690 same(oldtree
, newtree
)) || /* 14 and 15 */
691 (oldtree
&& newtree
&&
692 !same(oldtree
, newtree
) && /* 18 and 19*/
693 same(current
, newtree
))) {
694 return keep_entry(current
);
696 else if (oldtree
&& !newtree
&& same(current
, oldtree
)) {
698 return deleted_entry(oldtree
, current
);
700 else if (oldtree
&& newtree
&&
701 same(current
, oldtree
) && !same(current
, newtree
)) {
703 return merged_entry(newtree
, current
);
706 /* all other failures */
708 reject_merge(oldtree
);
710 reject_merge(current
);
712 reject_merge(newtree
);
717 return merged_entry(newtree
, current
);
719 return deleted_entry(oldtree
, current
);
726 * - take the stat information from stage0, take the data from stage1
728 static int oneway_merge(struct cache_entry
**src
)
730 struct cache_entry
*old
= src
[0];
731 struct cache_entry
*a
= src
[1];
734 return error("Cannot do a oneway merge of %d trees",
738 return deleted_entry(old
, old
);
739 if (old
&& same(old
, a
)) {
742 if (lstat(old
->name
, &st
) ||
743 ce_match_stat(old
, &st
, 1))
744 old
->ce_flags
|= htons(CE_UPDATE
);
746 return keep_entry(old
);
748 return merged_entry(a
, old
);
751 static int read_cache_unmerged(void)
754 struct cache_entry
**dst
;
759 for (i
= 0; i
< active_nr
; i
++) {
760 struct cache_entry
*ce
= active_cache
[i
];
763 invalidate_ce_path(ce
);
770 active_nr
-= deleted
;
774 static void prime_cache_tree_rec(struct cache_tree
*it
, struct tree
*tree
)
776 struct tree_entry_list
*ent
;
779 memcpy(it
->sha1
, tree
->object
.sha1
, 20);
780 for (cnt
= 0, ent
= tree
->entries
; ent
; ent
= ent
->next
) {
784 struct cache_tree_sub
*sub
;
785 struct tree
*subtree
= (struct tree
*)ent
->item
.tree
;
786 if (!subtree
->object
.parsed
)
788 sub
= cache_tree_sub(it
, ent
->name
);
789 sub
->cache_tree
= cache_tree();
790 prime_cache_tree_rec(sub
->cache_tree
, subtree
);
791 cnt
+= sub
->cache_tree
->entry_count
;
794 it
->entry_count
= cnt
;
797 static void prime_cache_tree(void)
799 struct tree
*tree
= (struct tree
*)trees
->item
;
802 active_cache_tree
= cache_tree();
803 prime_cache_tree_rec(active_cache_tree
, tree
);
807 static const char read_tree_usage
[] = "git-read-tree (<sha> | -m [--aggressive] [-u | -i] <sha1> [<sha2> [<sha3>]])";
809 static struct cache_file cache_file
;
811 int main(int argc
, char **argv
)
813 int i
, newfd
, stage
= 0;
814 unsigned char sha1
[20];
815 merge_fn_t fn
= NULL
;
817 setup_git_directory();
818 git_config(git_default_config
);
820 newfd
= hold_index_file_for_update(&cache_file
, get_index_file());
822 die("unable to create new cachefile");
824 git_config(git_default_config
);
828 for (i
= 1; i
< argc
; i
++) {
829 const char *arg
= argv
[i
];
831 /* "-u" means "update", meaning that a merge will update
834 if (!strcmp(arg
, "-u")) {
839 if (!strcmp(arg
, "-v")) {
844 /* "-i" means "index only", meaning that a merge will
845 * not even look at the working tree.
847 if (!strcmp(arg
, "-i")) {
852 /* This differs from "-m" in that we'll silently ignore unmerged entries */
853 if (!strcmp(arg
, "--reset")) {
855 usage(read_tree_usage
);
859 read_cache_unmerged();
863 if (!strcmp(arg
, "--trivial")) {
864 trivial_merges_only
= 1;
868 if (!strcmp(arg
, "--aggressive")) {
873 /* "-m" stands for "merge", meaning we start in stage 1 */
874 if (!strcmp(arg
, "-m")) {
876 usage(read_tree_usage
);
877 if (read_cache_unmerged())
878 die("you need to resolve your current index first");
884 /* using -u and -i at the same time makes no sense */
885 if (1 < index_only
+ update
)
886 usage(read_tree_usage
);
888 if (get_sha1(arg
, sha1
))
889 die("Not a valid object name %s", arg
);
890 if (list_tree(sha1
) < 0)
891 die("failed to unpack tree object %s", arg
);
894 if ((update
||index_only
) && !merge
)
895 usage(read_tree_usage
);
899 die("just how do you expect me to merge %d trees?", stage
-1);
910 cache_tree_free(&active_cache_tree
);
915 head_idx
= stage
- 2;
923 * When reading only one tree (either the most basic form,
924 * "-m ent" or "--reset ent" form), we can obtain a fully
925 * valid cache-tree because the index must match exactly
926 * what came from the tree.
928 if (trees
&& trees
->item
&& (!merge
|| (stage
== 2))) {
929 cache_tree_free(&active_cache_tree
);
933 if (write_cache(newfd
, active_cache
, active_nr
) ||
934 commit_index_file(&cache_file
))
935 die("unable to write new index file");