2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
12 #include "cache-tree.h"
18 static int update
= 0;
19 static int index_only
= 0;
20 static int nontrivial_merge
= 0;
21 static int trivial_merges_only
= 0;
22 static int aggressive
= 0;
23 static int verbose_update
= 0;
24 static volatile int progress_update
= 0;
25 static const char *prefix
= NULL
;
27 static int head_idx
= -1;
28 static int merge_size
= 0;
30 static struct object_list
*trees
= NULL
;
32 static struct cache_entry df_conflict_entry
= {
35 static struct tree_entry_list df_conflict_list
= {
37 .next
= &df_conflict_list
40 typedef int (*merge_fn_t
)(struct cache_entry
**src
);
42 static int entcmp(char *name1
, int dir1
, char *name2
, int dir2
)
44 int len1
= strlen(name1
);
45 int len2
= strlen(name2
);
46 int len
= len1
< len2
? len1
: len2
;
47 int ret
= memcmp(name1
, name2
, len
);
57 ret
= (c1
< c2
) ? -1 : (c1
> c2
) ? 1 : 0;
63 static int unpack_trees_rec(struct tree_entry_list
**posns
, int len
,
64 const char *base
, merge_fn_t fn
, int *indpos
)
66 int baselen
= strlen(base
);
67 int src_size
= len
+ 1;
74 struct tree_entry_list
**subposns
;
75 struct cache_entry
**src
;
81 /* Find the first name in the input. */
87 if (merge
&& *indpos
< active_nr
) {
88 /* This is a bit tricky: */
89 /* If the index has a subdirectory (with
90 * contents) as the first name, it'll get a
91 * filename like "foo/bar". But that's after
92 * "foo", so the entry in trees will get
93 * handled first, at which point we'll go into
94 * "foo", and deal with "bar" from the index,
95 * because the base will be "foo/". The only
96 * way we can actually have "foo/bar" first of
97 * all the things is if the trees don't
98 * contain "foo" at all, in which case we'll
99 * handle "foo/bar" without going into the
100 * directory, but that's fine (and will return
101 * an error anyway, with the added unknown
105 cache_name
= active_cache
[*indpos
]->name
;
106 if (strlen(cache_name
) > baselen
&&
107 !memcmp(cache_name
, base
, baselen
)) {
108 cache_name
+= baselen
;
117 printf("index %s\n", first
);
119 for (i
= 0; i
< len
; i
++) {
120 if (!posns
[i
] || posns
[i
] == &df_conflict_list
)
123 printf("%d %s\n", i
+ 1, posns
[i
]->name
);
125 if (!first
|| entcmp(first
, firstdir
,
127 posns
[i
]->directory
) > 0) {
128 first
= posns
[i
]->name
;
129 firstdir
= posns
[i
]->directory
;
132 /* No name means we're done */
136 pathlen
= strlen(first
);
137 ce_size
= cache_entry_size(baselen
+ pathlen
);
139 src
= xcalloc(src_size
, sizeof(struct cache_entry
*));
141 subposns
= xcalloc(len
, sizeof(struct tree_list_entry
*));
143 if (cache_name
&& !strcmp(cache_name
, first
)) {
145 src
[0] = active_cache
[*indpos
];
146 remove_cache_entry_at(*indpos
);
149 for (i
= 0; i
< len
; i
++) {
150 struct cache_entry
*ce
;
153 (posns
[i
] != &df_conflict_list
&&
154 strcmp(first
, posns
[i
]->name
))) {
158 if (posns
[i
] == &df_conflict_list
) {
159 src
[i
+ merge
] = &df_conflict_entry
;
163 if (posns
[i
]->directory
) {
165 parse_tree(posns
[i
]->item
.tree
);
166 subposns
[i
] = posns
[i
]->item
.tree
->entries
;
167 posns
[i
] = posns
[i
]->next
;
168 src
[i
+ merge
] = &df_conflict_entry
;
174 else if (i
+ 1 < head_idx
)
176 else if (i
+ 1 > head_idx
)
181 ce
= xcalloc(1, ce_size
);
182 ce
->ce_mode
= create_ce_mode(posns
[i
]->mode
);
183 ce
->ce_flags
= create_ce_flags(baselen
+ pathlen
,
185 memcpy(ce
->name
, base
, baselen
);
186 memcpy(ce
->name
+ baselen
, first
, pathlen
+ 1);
190 memcpy(ce
->sha1
, posns
[i
]->item
.any
->sha1
, 20);
192 subposns
[i
] = &df_conflict_list
;
193 posns
[i
] = posns
[i
]->next
;
200 printf("%s:\n", first
);
201 for (i
= 0; i
< src_size
; i
++) {
204 printf("%s\n", sha1_to_hex(src
[i
]->sha1
));
212 printf("Added %d entries\n", ret
);
216 for (i
= 0; i
< src_size
; i
++) {
218 add_cache_entry(src
[i
], ADD_CACHE_OK_TO_ADD
|ADD_CACHE_SKIP_DFCHECK
);
224 char *newbase
= xmalloc(baselen
+ 2 + pathlen
);
225 memcpy(newbase
, base
, baselen
);
226 memcpy(newbase
+ baselen
, first
, pathlen
);
227 newbase
[baselen
+ pathlen
] = '/';
228 newbase
[baselen
+ pathlen
+ 1] = '\0';
229 if (unpack_trees_rec(subposns
, len
, newbase
, fn
,
239 static void reject_merge(struct cache_entry
*ce
)
241 die("Entry '%s' would be overwritten by merge. Cannot merge.",
245 /* Unlink the last component and attempt to remove leading
246 * directories, in case this unlink is the removal of the
247 * last entry in the directory -- empty directories are removed.
249 static void unlink_entry(char *name
)
258 cp
= strrchr(name
, '/');
265 status
= rmdir(name
);
274 static void progress_interval(int signum
)
279 static void setup_progress_signal(void)
284 memset(&sa
, 0, sizeof(sa
));
285 sa
.sa_handler
= progress_interval
;
286 sigemptyset(&sa
.sa_mask
);
287 sa
.sa_flags
= SA_RESTART
;
288 sigaction(SIGALRM
, &sa
, NULL
);
290 v
.it_interval
.tv_sec
= 1;
291 v
.it_interval
.tv_usec
= 0;
292 v
.it_value
= v
.it_interval
;
293 setitimer(ITIMER_REAL
, &v
, NULL
);
296 static void check_updates(struct cache_entry
**src
, int nr
)
298 static struct checkout state
= {
304 unsigned short mask
= htons(CE_UPDATE
);
305 unsigned last_percent
= 200, cnt
= 0, total
= 0;
307 if (update
&& verbose_update
) {
308 for (total
= cnt
= 0; cnt
< nr
; cnt
++) {
309 struct cache_entry
*ce
= src
[cnt
];
310 if (!ce
->ce_mode
|| ce
->ce_flags
& mask
)
314 /* Don't bother doing this for very small updates */
319 fprintf(stderr
, "Checking files out...\n");
320 setup_progress_signal();
327 struct cache_entry
*ce
= *src
++;
330 if (!ce
->ce_mode
|| ce
->ce_flags
& mask
) {
333 percent
= (cnt
* 100) / total
;
334 if (percent
!= last_percent
||
336 fprintf(stderr
, "%4u%% (%u/%u) done\r",
337 percent
, cnt
, total
);
338 last_percent
= percent
;
344 unlink_entry(ce
->name
);
347 if (ce
->ce_flags
& mask
) {
348 ce
->ce_flags
&= ~mask
;
350 checkout_entry(ce
, &state
, NULL
);
354 signal(SIGALRM
, SIG_IGN
);
359 static int unpack_trees(merge_fn_t fn
)
362 unsigned len
= object_list_length(trees
);
363 struct tree_entry_list
**posns
;
365 struct object_list
*posn
= trees
;
369 posns
= xmalloc(len
* sizeof(struct tree_entry_list
*));
370 for (i
= 0; i
< len
; i
++) {
371 posns
[i
] = ((struct tree
*) posn
->item
)->entries
;
374 if (unpack_trees_rec(posns
, len
, prefix
? prefix
: "",
379 if (trivial_merges_only
&& nontrivial_merge
)
380 die("Merge requires file-level merging");
382 check_updates(active_cache
, active_nr
);
386 static int list_tree(unsigned char *sha1
)
388 struct tree
*tree
= parse_tree_indirect(sha1
);
391 object_list_append(&tree
->object
, &trees
);
395 static int same(struct cache_entry
*a
, struct cache_entry
*b
)
401 return a
->ce_mode
== b
->ce_mode
&&
402 !memcmp(a
->sha1
, b
->sha1
, 20);
407 * When a CE gets turned into an unmerged entry, we
408 * want it to be up-to-date
410 static void verify_uptodate(struct cache_entry
*ce
)
414 if (index_only
|| reset
)
417 if (!lstat(ce
->name
, &st
)) {
418 unsigned changed
= ce_match_stat(ce
, &st
, 1);
424 ce
->ce_flags
|= htons(CE_UPDATE
);
429 die("Entry '%s' not uptodate. Cannot merge.", ce
->name
);
432 static void invalidate_ce_path(struct cache_entry
*ce
)
435 cache_tree_invalidate_path(active_cache_tree
, ce
->name
);
439 * We do not want to remove or overwrite a working tree file that
442 static void verify_absent(const char *path
, const char *action
)
446 if (index_only
|| reset
|| !update
)
448 if (!lstat(path
, &st
))
449 die("Untracked working tree file '%s' "
450 "would be %s by merge.", path
, action
);
453 static int merged_entry(struct cache_entry
*merge
, struct cache_entry
*old
)
455 merge
->ce_flags
|= htons(CE_UPDATE
);
458 * See if we can re-use the old CE directly?
459 * That way we get the uptodate stat info.
461 * This also removes the UPDATE flag on
464 if (same(old
, merge
)) {
467 verify_uptodate(old
);
468 invalidate_ce_path(old
);
472 verify_absent(merge
->name
, "overwritten");
473 invalidate_ce_path(merge
);
476 merge
->ce_flags
&= ~htons(CE_STAGEMASK
);
477 add_cache_entry(merge
, ADD_CACHE_OK_TO_ADD
);
481 static int deleted_entry(struct cache_entry
*ce
, struct cache_entry
*old
)
484 verify_uptodate(old
);
486 verify_absent(ce
->name
, "removed");
488 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
);
489 invalidate_ce_path(ce
);
493 static int keep_entry(struct cache_entry
*ce
)
495 add_cache_entry(ce
, ADD_CACHE_OK_TO_ADD
);
500 static void show_stage_entry(FILE *o
,
501 const char *label
, const struct cache_entry
*ce
)
504 fprintf(o
, "%s (missing)\n", label
);
506 fprintf(o
, "%s%06o %s %d\t%s\n",
509 sha1_to_hex(ce
->sha1
),
515 static int threeway_merge(struct cache_entry
**stages
)
517 struct cache_entry
*index
;
518 struct cache_entry
*head
;
519 struct cache_entry
*remote
= stages
[head_idx
+ 1];
522 int remote_match
= 0;
523 const char *path
= NULL
;
525 int df_conflict_head
= 0;
526 int df_conflict_remote
= 0;
528 int any_anc_missing
= 0;
529 int no_anc_exists
= 1;
532 for (i
= 1; i
< head_idx
; i
++) {
537 path
= stages
[i
]->name
;
543 head
= stages
[head_idx
];
545 if (head
== &df_conflict_entry
) {
546 df_conflict_head
= 1;
550 if (remote
== &df_conflict_entry
) {
551 df_conflict_remote
= 1;
562 /* First, if there's a #16 situation, note that to prevent #13
565 if (!same(remote
, head
)) {
566 for (i
= 1; i
< head_idx
; i
++) {
567 if (same(stages
[i
], head
)) {
570 if (same(stages
[i
], remote
)) {
576 /* We start with cases where the index is allowed to match
577 * something other than the head: #14(ALT) and #2ALT, where it
578 * is permitted to match the result instead.
580 /* #14, #14ALT, #2ALT */
581 if (remote
&& !df_conflict_head
&& head_match
&& !remote_match
) {
582 if (index
&& !same(index
, remote
) && !same(index
, head
))
584 return merged_entry(remote
, index
);
587 * If we have an entry in the index cache, then we want to
588 * make sure that it matches head.
590 if (index
&& !same(index
, head
)) {
596 if (same(head
, remote
))
597 return merged_entry(head
, index
);
599 if (!df_conflict_remote
&& remote_match
&& !head_match
)
600 return merged_entry(head
, index
);
604 if (!head
&& !remote
&& any_anc_missing
)
607 /* Under the new "aggressive" rule, we resolve mostly trivial
608 * cases that we historically had git-merge-one-file resolve.
611 int head_deleted
= !head
&& !df_conflict_head
;
612 int remote_deleted
= !remote
&& !df_conflict_remote
;
615 * Deleted in one and unchanged in the other.
617 if ((head_deleted
&& remote_deleted
) ||
618 (head_deleted
&& remote
&& remote_match
) ||
619 (remote_deleted
&& head
&& head_match
)) {
621 return deleted_entry(index
, index
);
623 verify_absent(path
, "removed");
627 * Added in both, identically.
629 if (no_anc_exists
&& head
&& remote
&& same(head
, remote
))
630 return merged_entry(head
, index
);
634 /* Below are "no merge" cases, which require that the index be
635 * up-to-date to avoid the files getting overwritten with
636 * conflict resolution files.
639 verify_uptodate(index
);
642 verify_absent(path
, "overwritten");
644 nontrivial_merge
= 1;
646 /* #2, #3, #4, #6, #7, #9, #11. */
648 if (!head_match
|| !remote_match
) {
649 for (i
= 1; i
< head_idx
; i
++) {
651 keep_entry(stages
[i
]);
659 fprintf(stderr
, "read-tree: warning #16 detected\n");
660 show_stage_entry(stderr
, "head ", stages
[head_match
]);
661 show_stage_entry(stderr
, "remote ", stages
[remote_match
]);
664 if (head
) { count
+= keep_entry(head
); }
665 if (remote
) { count
+= keep_entry(remote
); }
672 * The rule is to "carry forward" what is in the index without losing
673 * information across a "fast forward", favoring a successful merge
674 * over a merge failure when it makes sense. For details of the
675 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
678 static int twoway_merge(struct cache_entry
**src
)
680 struct cache_entry
*current
= src
[0];
681 struct cache_entry
*oldtree
= src
[1], *newtree
= src
[2];
684 return error("Cannot do a twoway merge of %d trees",
688 if ((!oldtree
&& !newtree
) || /* 4 and 5 */
689 (!oldtree
&& newtree
&&
690 same(current
, newtree
)) || /* 6 and 7 */
691 (oldtree
&& newtree
&&
692 same(oldtree
, newtree
)) || /* 14 and 15 */
693 (oldtree
&& newtree
&&
694 !same(oldtree
, newtree
) && /* 18 and 19*/
695 same(current
, newtree
))) {
696 return keep_entry(current
);
698 else if (oldtree
&& !newtree
&& same(current
, oldtree
)) {
700 return deleted_entry(oldtree
, current
);
702 else if (oldtree
&& newtree
&&
703 same(current
, oldtree
) && !same(current
, newtree
)) {
705 return merged_entry(newtree
, current
);
708 /* all other failures */
710 reject_merge(oldtree
);
712 reject_merge(current
);
714 reject_merge(newtree
);
719 return merged_entry(newtree
, current
);
721 return deleted_entry(oldtree
, current
);
727 * Keep the index entries at stage0, collapse stage1 but make sure
728 * stage0 does not have anything there.
730 static int bind_merge(struct cache_entry
**src
)
732 struct cache_entry
*old
= src
[0];
733 struct cache_entry
*a
= src
[1];
736 return error("Cannot do a bind merge of %d trees\n",
739 die("Entry '%s' overlaps. Cannot bind.", a
->name
);
741 return keep_entry(old
);
743 return merged_entry(a
, NULL
);
750 * - take the stat information from stage0, take the data from stage1
752 static int oneway_merge(struct cache_entry
**src
)
754 struct cache_entry
*old
= src
[0];
755 struct cache_entry
*a
= src
[1];
758 return error("Cannot do a oneway merge of %d trees",
762 invalidate_ce_path(old
);
763 return deleted_entry(old
, old
);
765 if (old
&& same(old
, a
)) {
768 if (lstat(old
->name
, &st
) ||
769 ce_match_stat(old
, &st
, 1))
770 old
->ce_flags
|= htons(CE_UPDATE
);
772 return keep_entry(old
);
774 return merged_entry(a
, old
);
777 static int read_cache_unmerged(void)
780 struct cache_entry
**dst
;
785 for (i
= 0; i
< active_nr
; i
++) {
786 struct cache_entry
*ce
= active_cache
[i
];
789 invalidate_ce_path(ce
);
796 active_nr
-= deleted
;
800 static void prime_cache_tree_rec(struct cache_tree
*it
, struct tree
*tree
)
802 struct tree_entry_list
*ent
;
805 memcpy(it
->sha1
, tree
->object
.sha1
, 20);
806 for (cnt
= 0, ent
= tree
->entries
; ent
; ent
= ent
->next
) {
810 struct cache_tree_sub
*sub
;
811 struct tree
*subtree
= (struct tree
*)ent
->item
.tree
;
812 if (!subtree
->object
.parsed
)
814 sub
= cache_tree_sub(it
, ent
->name
);
815 sub
->cache_tree
= cache_tree();
816 prime_cache_tree_rec(sub
->cache_tree
, subtree
);
817 cnt
+= sub
->cache_tree
->entry_count
;
820 it
->entry_count
= cnt
;
823 static void prime_cache_tree(void)
825 struct tree
*tree
= (struct tree
*)trees
->item
;
828 active_cache_tree
= cache_tree();
829 prime_cache_tree_rec(active_cache_tree
, tree
);
833 static const char read_tree_usage
[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
835 static struct cache_file cache_file
;
837 int main(int argc
, char **argv
)
839 int i
, newfd
, stage
= 0;
840 unsigned char sha1
[20];
841 merge_fn_t fn
= NULL
;
843 setup_git_directory();
844 git_config(git_default_config
);
846 newfd
= hold_index_file_for_update(&cache_file
, get_index_file());
848 die("unable to create new cachefile");
850 git_config(git_default_config
);
854 for (i
= 1; i
< argc
; i
++) {
855 const char *arg
= argv
[i
];
857 /* "-u" means "update", meaning that a merge will update
860 if (!strcmp(arg
, "-u")) {
865 if (!strcmp(arg
, "-v")) {
870 /* "-i" means "index only", meaning that a merge will
871 * not even look at the working tree.
873 if (!strcmp(arg
, "-i")) {
878 /* "--prefix=<subdirectory>/" means keep the current index
879 * entries and put the entries from the tree under the
880 * given subdirectory.
882 if (!strncmp(arg
, "--prefix=", 9)) {
883 if (stage
|| merge
|| prefix
)
884 usage(read_tree_usage
);
888 if (read_cache_unmerged())
889 die("you need to resolve your current index first");
893 /* This differs from "-m" in that we'll silently ignore unmerged entries */
894 if (!strcmp(arg
, "--reset")) {
895 if (stage
|| merge
|| prefix
)
896 usage(read_tree_usage
);
900 read_cache_unmerged();
904 if (!strcmp(arg
, "--trivial")) {
905 trivial_merges_only
= 1;
909 if (!strcmp(arg
, "--aggressive")) {
914 /* "-m" stands for "merge", meaning we start in stage 1 */
915 if (!strcmp(arg
, "-m")) {
916 if (stage
|| merge
|| prefix
)
917 usage(read_tree_usage
);
918 if (read_cache_unmerged())
919 die("you need to resolve your current index first");
925 /* using -u and -i at the same time makes no sense */
926 if (1 < index_only
+ update
)
927 usage(read_tree_usage
);
929 if (get_sha1(arg
, sha1
))
930 die("Not a valid object name %s", arg
);
931 if (list_tree(sha1
) < 0)
932 die("failed to unpack tree object %s", arg
);
935 if ((update
||index_only
) && !merge
)
936 usage(read_tree_usage
);
939 int pfxlen
= strlen(prefix
);
941 if (prefix
[pfxlen
-1] != '/')
942 die("prefix must end with /");
944 die("binding merge takes only one tree");
945 pos
= cache_name_pos(prefix
, pfxlen
);
947 die("corrupt index file");
949 if (pos
< active_nr
&&
950 !strncmp(active_cache
[pos
]->name
, prefix
, pfxlen
))
951 die("subdirectory '%s' already exists.", prefix
);
952 pos
= cache_name_pos(prefix
, pfxlen
-1);
954 die("file '%.*s' already exists.", pfxlen
-1, prefix
);
959 die("just how do you expect me to merge %d trees?", stage
-1);
962 fn
= prefix
? bind_merge
: oneway_merge
;
970 cache_tree_free(&active_cache_tree
);
975 head_idx
= stage
- 2;
983 * When reading only one tree (either the most basic form,
984 * "-m ent" or "--reset ent" form), we can obtain a fully
985 * valid cache-tree because the index must match exactly
986 * what came from the tree.
988 if (trees
&& trees
->item
&& (!merge
|| (stage
== 2))) {
989 cache_tree_free(&active_cache_tree
);
993 if (write_cache(newfd
, active_cache
, active_nr
) ||
994 commit_index_file(&cache_file
))
995 die("unable to write new index file");