3 #include "repository.h"
9 #include "cache-tree.h"
10 #include "unpack-trees.h"
14 #include "split-index.h"
15 #include "sparse-index.h"
16 #include "submodule.h"
17 #include "submodule-config.h"
18 #include "fsmonitor.h"
19 #include "object-store.h"
20 #include "promisor-remote.h"
22 #include "parallel-checkout.h"
25 * Error messages expected by scripts out of plumbing commands such as
26 * read-tree. Non-scripted Porcelain is not required to use these messages
27 * and in fact are encouraged to reword them to better suit their particular
28 * situation better. See how "git checkout" and "git merge" replaces
29 * them using setup_unpack_trees_porcelain(), for example.
31 static const char *unpack_plumbing_errors
[NB_UNPACK_TREES_WARNING_TYPES
] = {
32 /* ERROR_WOULD_OVERWRITE */
33 "Entry '%s' would be overwritten by merge. Cannot merge.",
35 /* ERROR_NOT_UPTODATE_FILE */
36 "Entry '%s' not uptodate. Cannot merge.",
38 /* ERROR_NOT_UPTODATE_DIR */
39 "Updating '%s' would lose untracked files in it",
41 /* ERROR_CWD_IN_THE_WAY */
42 "Refusing to remove '%s' since it is the current working directory.",
44 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */
45 "Untracked working tree file '%s' would be overwritten by merge.",
47 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */
48 "Untracked working tree file '%s' would be removed by merge.",
50 /* ERROR_BIND_OVERLAP */
51 "Entry '%s' overlaps with '%s'. Cannot bind.",
53 /* ERROR_WOULD_LOSE_SUBMODULE */
54 "Submodule '%s' cannot checkout new HEAD.",
56 /* NB_UNPACK_TREES_ERROR_TYPES; just a meta value */
59 /* WARNING_SPARSE_NOT_UPTODATE_FILE */
60 "Path '%s' not uptodate; will not remove from working tree.",
62 /* WARNING_SPARSE_UNMERGED_FILE */
63 "Path '%s' unmerged; will not remove from working tree.",
65 /* WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN */
66 "Path '%s' already present; will not overwrite with sparse update.",
69 #define ERRORMSG(o,type) \
70 ( ((o) && (o)->msgs[(type)]) \
71 ? ((o)->msgs[(type)]) \
72 : (unpack_plumbing_errors[(type)]) )
74 static const char *super_prefixed(const char *path
, const char *super_prefix
)
77 * It is necessary and sufficient to have two static buffers
78 * here, as the return value of this function is fed to
79 * error() using the unpack_*_errors[] templates we see above.
81 static struct strbuf buf
[2] = {STRBUF_INIT
, STRBUF_INIT
};
82 static int super_prefix_len
= -1;
83 static unsigned idx
= ARRAY_SIZE(buf
) - 1;
85 if (super_prefix_len
< 0) {
90 for (i
= 0; i
< ARRAY_SIZE(buf
); i
++)
91 strbuf_addstr(&buf
[i
], super_prefix
);
92 super_prefix_len
= buf
[0].len
;
96 if (!super_prefix_len
)
99 if (++idx
>= ARRAY_SIZE(buf
))
102 strbuf_setlen(&buf
[idx
], super_prefix_len
);
103 strbuf_addstr(&buf
[idx
], path
);
108 void setup_unpack_trees_porcelain(struct unpack_trees_options
*opts
,
112 const char **msgs
= opts
->msgs
;
115 strvec_init(&opts
->msgs_to_free
);
117 if (!strcmp(cmd
, "checkout"))
118 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
119 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s"
120 "Please commit your changes or stash them before you switch branches.")
121 : _("Your local changes to the following files would be overwritten by checkout:\n%%s");
122 else if (!strcmp(cmd
, "merge"))
123 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
124 ? _("Your local changes to the following files would be overwritten by merge:\n%%s"
125 "Please commit your changes or stash them before you merge.")
126 : _("Your local changes to the following files would be overwritten by merge:\n%%s");
128 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
129 ? _("Your local changes to the following files would be overwritten by %s:\n%%s"
130 "Please commit your changes or stash them before you %s.")
131 : _("Your local changes to the following files would be overwritten by %s:\n%%s");
132 msgs
[ERROR_WOULD_OVERWRITE
] = msgs
[ERROR_NOT_UPTODATE_FILE
] =
133 strvec_pushf(&opts
->msgs_to_free
, msg
, cmd
, cmd
);
135 msgs
[ERROR_NOT_UPTODATE_DIR
] =
136 _("Updating the following directories would lose untracked files in them:\n%s");
138 msgs
[ERROR_CWD_IN_THE_WAY
] =
139 _("Refusing to remove the current working directory:\n%s");
141 if (!strcmp(cmd
, "checkout"))
142 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
143 ? _("The following untracked working tree files would be removed by checkout:\n%%s"
144 "Please move or remove them before you switch branches.")
145 : _("The following untracked working tree files would be removed by checkout:\n%%s");
146 else if (!strcmp(cmd
, "merge"))
147 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
148 ? _("The following untracked working tree files would be removed by merge:\n%%s"
149 "Please move or remove them before you merge.")
150 : _("The following untracked working tree files would be removed by merge:\n%%s");
152 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
153 ? _("The following untracked working tree files would be removed by %s:\n%%s"
154 "Please move or remove them before you %s.")
155 : _("The following untracked working tree files would be removed by %s:\n%%s");
156 msgs
[ERROR_WOULD_LOSE_UNTRACKED_REMOVED
] =
157 strvec_pushf(&opts
->msgs_to_free
, msg
, cmd
, cmd
);
159 if (!strcmp(cmd
, "checkout"))
160 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
161 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s"
162 "Please move or remove them before you switch branches.")
163 : _("The following untracked working tree files would be overwritten by checkout:\n%%s");
164 else if (!strcmp(cmd
, "merge"))
165 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
166 ? _("The following untracked working tree files would be overwritten by merge:\n%%s"
167 "Please move or remove them before you merge.")
168 : _("The following untracked working tree files would be overwritten by merge:\n%%s");
170 msg
= advice_enabled(ADVICE_COMMIT_BEFORE_MERGE
)
171 ? _("The following untracked working tree files would be overwritten by %s:\n%%s"
172 "Please move or remove them before you %s.")
173 : _("The following untracked working tree files would be overwritten by %s:\n%%s");
174 msgs
[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
] =
175 strvec_pushf(&opts
->msgs_to_free
, msg
, cmd
, cmd
);
178 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
179 * cannot easily display it as a list.
181 msgs
[ERROR_BIND_OVERLAP
] = _("Entry '%s' overlaps with '%s'. Cannot bind.");
183 msgs
[ERROR_WOULD_LOSE_SUBMODULE
] =
184 _("Cannot update submodule:\n%s");
186 msgs
[WARNING_SPARSE_NOT_UPTODATE_FILE
] =
187 _("The following paths are not up to date and were left despite sparse patterns:\n%s");
188 msgs
[WARNING_SPARSE_UNMERGED_FILE
] =
189 _("The following paths are unmerged and were left despite sparse patterns:\n%s");
190 msgs
[WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN
] =
191 _("The following paths were already present and thus not updated despite sparse patterns:\n%s");
193 opts
->show_all_errors
= 1;
194 /* rejected paths may not have a static buffer */
195 for (i
= 0; i
< ARRAY_SIZE(opts
->unpack_rejects
); i
++)
196 opts
->unpack_rejects
[i
].strdup_strings
= 1;
199 void clear_unpack_trees_porcelain(struct unpack_trees_options
*opts
)
201 strvec_clear(&opts
->msgs_to_free
);
202 memset(opts
->msgs
, 0, sizeof(opts
->msgs
));
205 static int do_add_entry(struct unpack_trees_options
*o
, struct cache_entry
*ce
,
206 unsigned int set
, unsigned int clear
)
213 ce
->ce_flags
= (ce
->ce_flags
& ~clear
) | set
;
214 return add_index_entry(&o
->result
, ce
,
215 ADD_CACHE_OK_TO_ADD
| ADD_CACHE_OK_TO_REPLACE
);
218 static void add_entry(struct unpack_trees_options
*o
,
219 const struct cache_entry
*ce
,
220 unsigned int set
, unsigned int clear
)
222 do_add_entry(o
, dup_cache_entry(ce
, &o
->result
), set
, clear
);
226 * add error messages on path <path>
227 * corresponding to the type <e> with the message <msg>
228 * indicating if it should be display in porcelain or not
230 static int add_rejected_path(struct unpack_trees_options
*o
,
231 enum unpack_trees_error_types e
,
237 if (!o
->show_all_errors
)
238 return error(ERRORMSG(o
, e
), super_prefixed(path
,
242 * Otherwise, insert in a list for future display by
243 * display_(error|warning)_msgs()
245 string_list_append(&o
->unpack_rejects
[e
], path
);
250 * display all the error messages stored in a nice way
252 static void display_error_msgs(struct unpack_trees_options
*o
)
255 unsigned error_displayed
= 0;
256 for (e
= 0; e
< NB_UNPACK_TREES_ERROR_TYPES
; e
++) {
257 struct string_list
*rejects
= &o
->unpack_rejects
[e
];
259 if (rejects
->nr
> 0) {
261 struct strbuf path
= STRBUF_INIT
;
264 for (i
= 0; i
< rejects
->nr
; i
++)
265 strbuf_addf(&path
, "\t%s\n", rejects
->items
[i
].string
);
266 error(ERRORMSG(o
, e
), super_prefixed(path
.buf
,
268 strbuf_release(&path
);
270 string_list_clear(rejects
, 0);
273 fprintf(stderr
, _("Aborting\n"));
277 * display all the warning messages stored in a nice way
279 static void display_warning_msgs(struct unpack_trees_options
*o
)
282 unsigned warning_displayed
= 0;
283 for (e
= NB_UNPACK_TREES_ERROR_TYPES
+ 1;
284 e
< NB_UNPACK_TREES_WARNING_TYPES
; e
++) {
285 struct string_list
*rejects
= &o
->unpack_rejects
[e
];
287 if (rejects
->nr
> 0) {
289 struct strbuf path
= STRBUF_INIT
;
291 warning_displayed
= 1;
292 for (i
= 0; i
< rejects
->nr
; i
++)
293 strbuf_addf(&path
, "\t%s\n", rejects
->items
[i
].string
);
294 warning(ERRORMSG(o
, e
), super_prefixed(path
.buf
,
296 strbuf_release(&path
);
298 string_list_clear(rejects
, 0);
300 if (warning_displayed
)
301 fprintf(stderr
, _("After fixing the above paths, you may want to run `git sparse-checkout reapply`.\n"));
303 static int check_submodule_move_head(const struct cache_entry
*ce
,
306 struct unpack_trees_options
*o
)
308 unsigned flags
= SUBMODULE_MOVE_HEAD_DRY_RUN
;
309 const struct submodule
*sub
= submodule_from_ce(ce
);
315 flags
|= SUBMODULE_MOVE_HEAD_FORCE
;
317 if (submodule_move_head(ce
->name
, o
->super_prefix
, old_id
, new_id
,
319 return add_rejected_path(o
, ERROR_WOULD_LOSE_SUBMODULE
, ce
->name
);
324 * Perform the loading of the repository's gitmodules file. This function is
325 * used by 'check_update()' to perform loading of the gitmodules file in two
326 * different situations:
327 * (1) before removing entries from the working tree if the gitmodules file has
328 * been marked for removal. This situation is specified by 'state' == NULL.
329 * (2) before checking out entries to the working tree if the gitmodules file
330 * has been marked for update. This situation is specified by 'state' != NULL.
332 static void load_gitmodules_file(struct index_state
*index
,
333 struct checkout
*state
)
335 int pos
= index_name_pos(index
, GITMODULES_FILE
, strlen(GITMODULES_FILE
));
338 struct cache_entry
*ce
= index
->cache
[pos
];
339 if (!state
&& ce
->ce_flags
& CE_WT_REMOVE
) {
340 repo_read_gitmodules(the_repository
, 0);
341 } else if (state
&& (ce
->ce_flags
& CE_UPDATE
)) {
342 submodule_free(the_repository
);
343 checkout_entry(ce
, state
, NULL
, NULL
);
344 repo_read_gitmodules(the_repository
, 0);
349 static struct progress
*get_progress(struct unpack_trees_options
*o
,
350 struct index_state
*index
)
352 unsigned cnt
= 0, total
= 0;
354 if (!o
->update
|| !o
->verbose_update
)
357 for (; cnt
< index
->cache_nr
; cnt
++) {
358 const struct cache_entry
*ce
= index
->cache
[cnt
];
359 if (ce
->ce_flags
& (CE_UPDATE
| CE_WT_REMOVE
))
363 return start_delayed_progress(_("Updating files"), total
);
366 static void setup_collided_checkout_detection(struct checkout
*state
,
367 struct index_state
*index
)
372 for (i
= 0; i
< index
->cache_nr
; i
++)
373 index
->cache
[i
]->ce_flags
&= ~CE_MATCHED
;
376 static void report_collided_checkout(struct index_state
*index
)
378 struct string_list list
= STRING_LIST_INIT_NODUP
;
381 for (i
= 0; i
< index
->cache_nr
; i
++) {
382 struct cache_entry
*ce
= index
->cache
[i
];
384 if (!(ce
->ce_flags
& CE_MATCHED
))
387 string_list_append(&list
, ce
->name
);
388 ce
->ce_flags
&= ~CE_MATCHED
;
391 list
.cmp
= fspathcmp
;
392 string_list_sort(&list
);
395 warning(_("the following paths have collided (e.g. case-sensitive paths\n"
396 "on a case-insensitive filesystem) and only one from the same\n"
397 "colliding group is in the working tree:\n"));
399 for (i
= 0; i
< list
.nr
; i
++)
400 fprintf(stderr
, " '%s'\n", list
.items
[i
].string
);
403 string_list_clear(&list
, 0);
406 static int must_checkout(const struct cache_entry
*ce
)
408 return ce
->ce_flags
& CE_UPDATE
;
411 static int check_updates(struct unpack_trees_options
*o
,
412 struct index_state
*index
)
416 struct progress
*progress
;
417 struct checkout state
= CHECKOUT_INIT
;
418 int i
, pc_workers
, pc_threshold
;
420 trace_performance_enter();
421 state
.super_prefix
= o
->super_prefix
;
424 state
.refresh_cache
= 1;
425 state
.istate
= index
;
426 clone_checkout_metadata(&state
.meta
, &o
->meta
, NULL
);
428 if (!o
->update
|| o
->dry_run
) {
429 remove_marked_cache_entries(index
, 0);
430 trace_performance_leave("check_updates");
435 setup_collided_checkout_detection(&state
, index
);
437 progress
= get_progress(o
, index
);
439 /* Start with clean cache to avoid using any possibly outdated info. */
440 invalidate_lstat_cache();
442 git_attr_set_direction(GIT_ATTR_CHECKOUT
);
444 if (should_update_submodules())
445 load_gitmodules_file(index
, NULL
);
447 for (i
= 0; i
< index
->cache_nr
; i
++) {
448 const struct cache_entry
*ce
= index
->cache
[i
];
450 if (ce
->ce_flags
& CE_WT_REMOVE
) {
451 display_progress(progress
, ++cnt
);
452 unlink_entry(ce
, o
->super_prefix
);
456 remove_marked_cache_entries(index
, 0);
457 remove_scheduled_dirs();
459 if (should_update_submodules())
460 load_gitmodules_file(index
, &state
);
462 if (has_promisor_remote())
464 * Prefetch the objects that are to be checked out in the loop
467 prefetch_cache_entries(index
, must_checkout
);
469 get_parallel_checkout_configs(&pc_workers
, &pc_threshold
);
471 enable_delayed_checkout(&state
);
473 init_parallel_checkout();
474 for (i
= 0; i
< index
->cache_nr
; i
++) {
475 struct cache_entry
*ce
= index
->cache
[i
];
477 if (must_checkout(ce
)) {
478 size_t last_pc_queue_size
= pc_queue_size();
480 if (ce
->ce_flags
& CE_WT_REMOVE
)
481 BUG("both update and delete flags are set on %s",
483 ce
->ce_flags
&= ~CE_UPDATE
;
484 errs
|= checkout_entry(ce
, &state
, NULL
, NULL
);
486 if (last_pc_queue_size
== pc_queue_size())
487 display_progress(progress
, ++cnt
);
491 errs
|= run_parallel_checkout(&state
, pc_workers
, pc_threshold
,
493 stop_progress(&progress
);
494 errs
|= finish_delayed_checkout(&state
, o
->verbose_update
);
495 git_attr_set_direction(GIT_ATTR_CHECKIN
);
498 report_collided_checkout(index
);
500 trace_performance_leave("check_updates");
504 static int verify_uptodate_sparse(const struct cache_entry
*ce
,
505 struct unpack_trees_options
*o
);
506 static int verify_absent_sparse(const struct cache_entry
*ce
,
507 enum unpack_trees_error_types
,
508 struct unpack_trees_options
*o
);
510 static int apply_sparse_checkout(struct index_state
*istate
,
511 struct cache_entry
*ce
,
512 struct unpack_trees_options
*o
)
514 int was_skip_worktree
= ce_skip_worktree(ce
);
516 if (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
)
517 ce
->ce_flags
|= CE_SKIP_WORKTREE
;
519 ce
->ce_flags
&= ~CE_SKIP_WORKTREE
;
520 if (was_skip_worktree
!= ce_skip_worktree(ce
)) {
521 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
522 mark_fsmonitor_invalid(istate
, ce
);
523 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
527 * if (!was_skip_worktree && !ce_skip_worktree()) {
528 * This is perfectly normal. Move on;
533 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout
534 * area as a result of ce_skip_worktree() shortcuts in
535 * verify_absent() and verify_uptodate().
536 * Make sure they don't modify worktree if they are already
537 * outside checkout area
539 if (was_skip_worktree
&& ce_skip_worktree(ce
)) {
540 ce
->ce_flags
&= ~CE_UPDATE
;
543 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also
544 * on to get that file removed from both index and worktree.
545 * If that file is already outside worktree area, don't
548 if (ce
->ce_flags
& CE_REMOVE
)
549 ce
->ce_flags
&= ~CE_WT_REMOVE
;
552 if (!was_skip_worktree
&& ce_skip_worktree(ce
)) {
554 * If CE_UPDATE is set, verify_uptodate() must be called already
555 * also stat info may have lost after merged_entry() so calling
556 * verify_uptodate() again may fail
558 if (!(ce
->ce_flags
& CE_UPDATE
) &&
559 verify_uptodate_sparse(ce
, o
)) {
560 ce
->ce_flags
&= ~CE_SKIP_WORKTREE
;
563 ce
->ce_flags
|= CE_WT_REMOVE
;
564 ce
->ce_flags
&= ~CE_UPDATE
;
566 if (was_skip_worktree
&& !ce_skip_worktree(ce
)) {
567 if (verify_absent_sparse(ce
, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN
, o
))
569 ce
->ce_flags
|= CE_UPDATE
;
574 static int warn_conflicted_path(struct index_state
*istate
,
576 struct unpack_trees_options
*o
)
578 char *conflicting_path
= istate
->cache
[i
]->name
;
581 add_rejected_path(o
, WARNING_SPARSE_UNMERGED_FILE
, conflicting_path
);
583 /* Find out how many higher stage entries are at same path */
584 while ((++count
) + i
< istate
->cache_nr
&&
585 !strcmp(conflicting_path
, istate
->cache
[count
+ i
]->name
))
591 static inline int call_unpack_fn(const struct cache_entry
* const *src
,
592 struct unpack_trees_options
*o
)
594 int ret
= o
->fn(src
, o
);
600 static void mark_ce_used(struct cache_entry
*ce
, struct unpack_trees_options
*o
)
602 ce
->ce_flags
|= CE_UNPACKED
;
604 if (o
->cache_bottom
< o
->src_index
->cache_nr
&&
605 o
->src_index
->cache
[o
->cache_bottom
] == ce
) {
606 int bottom
= o
->cache_bottom
;
607 while (bottom
< o
->src_index
->cache_nr
&&
608 o
->src_index
->cache
[bottom
]->ce_flags
& CE_UNPACKED
)
610 o
->cache_bottom
= bottom
;
614 static void mark_all_ce_unused(struct index_state
*index
)
617 for (i
= 0; i
< index
->cache_nr
; i
++)
618 index
->cache
[i
]->ce_flags
&= ~(CE_UNPACKED
| CE_ADDED
| CE_NEW_SKIP_WORKTREE
);
621 static int locate_in_src_index(const struct cache_entry
*ce
,
622 struct unpack_trees_options
*o
)
624 struct index_state
*index
= o
->src_index
;
625 int len
= ce_namelen(ce
);
626 int pos
= index_name_pos(index
, ce
->name
, len
);
633 * We call unpack_index_entry() with an unmerged cache entry
634 * only in diff-index, and it wants a single callback. Skip
635 * the other unmerged entry with the same name.
637 static void mark_ce_used_same_name(struct cache_entry
*ce
,
638 struct unpack_trees_options
*o
)
640 struct index_state
*index
= o
->src_index
;
641 int len
= ce_namelen(ce
);
644 for (pos
= locate_in_src_index(ce
, o
); pos
< index
->cache_nr
; pos
++) {
645 struct cache_entry
*next
= index
->cache
[pos
];
646 if (len
!= ce_namelen(next
) ||
647 memcmp(ce
->name
, next
->name
, len
))
649 mark_ce_used(next
, o
);
653 static struct cache_entry
*next_cache_entry(struct unpack_trees_options
*o
)
655 const struct index_state
*index
= o
->src_index
;
656 int pos
= o
->cache_bottom
;
658 while (pos
< index
->cache_nr
) {
659 struct cache_entry
*ce
= index
->cache
[pos
];
660 if (!(ce
->ce_flags
& CE_UNPACKED
))
667 static void add_same_unmerged(const struct cache_entry
*ce
,
668 struct unpack_trees_options
*o
)
670 struct index_state
*index
= o
->src_index
;
671 int len
= ce_namelen(ce
);
672 int pos
= index_name_pos(index
, ce
->name
, len
);
675 die("programming error in a caller of mark_ce_used_same_name");
676 for (pos
= -pos
- 1; pos
< index
->cache_nr
; pos
++) {
677 struct cache_entry
*next
= index
->cache
[pos
];
678 if (len
!= ce_namelen(next
) ||
679 memcmp(ce
->name
, next
->name
, len
))
681 add_entry(o
, next
, 0, 0);
682 mark_ce_used(next
, o
);
686 static int unpack_index_entry(struct cache_entry
*ce
,
687 struct unpack_trees_options
*o
)
689 const struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
696 if (o
->skip_unmerged
) {
697 add_entry(o
, ce
, 0, 0);
701 ret
= call_unpack_fn(src
, o
);
703 mark_ce_used_same_name(ce
, o
);
707 static int find_cache_pos(struct traverse_info
*, const char *p
, size_t len
);
709 static void restore_cache_bottom(struct traverse_info
*info
, int bottom
)
711 struct unpack_trees_options
*o
= info
->data
;
713 if (o
->diff_index_cached
)
715 o
->cache_bottom
= bottom
;
718 static int switch_cache_bottom(struct traverse_info
*info
)
720 struct unpack_trees_options
*o
= info
->data
;
723 if (o
->diff_index_cached
)
725 ret
= o
->cache_bottom
;
726 pos
= find_cache_pos(info
->prev
, info
->name
, info
->namelen
);
729 o
->cache_bottom
= -2 - pos
;
731 o
->cache_bottom
= o
->src_index
->cache_nr
;
735 static inline int are_same_oid(struct name_entry
*name_j
, struct name_entry
*name_k
)
737 return !is_null_oid(&name_j
->oid
) && !is_null_oid(&name_k
->oid
) && oideq(&name_j
->oid
, &name_k
->oid
);
740 static int all_trees_same_as_cache_tree(int n
, unsigned long dirmask
,
741 struct name_entry
*names
,
742 struct traverse_info
*info
)
744 struct unpack_trees_options
*o
= info
->data
;
747 if (!o
->merge
|| dirmask
!= ((1 << n
) - 1))
750 for (i
= 1; i
< n
; i
++)
751 if (!are_same_oid(names
, names
+ i
))
754 return cache_tree_matches_traversal(o
->src_index
->cache_tree
, names
, info
);
757 static int index_pos_by_traverse_info(struct name_entry
*names
,
758 struct traverse_info
*info
)
760 struct unpack_trees_options
*o
= info
->data
;
761 struct strbuf name
= STRBUF_INIT
;
764 strbuf_make_traverse_path(&name
, info
, names
->path
, names
->pathlen
);
765 strbuf_addch(&name
, '/');
766 pos
= index_name_pos(o
->src_index
, name
.buf
, name
.len
);
768 if (!o
->src_index
->sparse_index
||
769 !(o
->src_index
->cache
[pos
]->ce_flags
& CE_SKIP_WORKTREE
))
770 BUG("This is a directory and should not exist in index");
774 if (pos
>= o
->src_index
->cache_nr
||
775 !starts_with(o
->src_index
->cache
[pos
]->name
, name
.buf
) ||
776 (pos
> 0 && starts_with(o
->src_index
->cache
[pos
-1]->name
, name
.buf
)))
777 BUG("pos %d doesn't point to the first entry of %s in index",
779 strbuf_release(&name
);
784 * Fast path if we detect that all trees are the same as cache-tree at this
785 * path. We'll walk these trees in an iterative loop using cache-tree/index
786 * instead of ODB since we already know what these trees contain.
788 static int traverse_by_cache_tree(int pos
, int nr_entries
, int nr_names
,
789 struct traverse_info
*info
)
791 struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
792 struct unpack_trees_options
*o
= info
->data
;
793 struct cache_entry
*tree_ce
= NULL
;
798 BUG("We need cache-tree to do this optimization");
801 * Do what unpack_callback() and unpack_single_entry() normally
802 * do. But we walk all paths in an iterative loop instead.
804 * D/F conflicts and higher stage entries are not a concern
805 * because cache-tree would be invalidated and we would never
806 * get here in the first place.
808 for (i
= 0; i
< nr_entries
; i
++) {
809 int new_ce_len
, len
, rc
;
811 src
[0] = o
->src_index
->cache
[pos
+ i
];
813 len
= ce_namelen(src
[0]);
814 new_ce_len
= cache_entry_size(len
);
816 if (new_ce_len
> ce_len
) {
818 tree_ce
= xrealloc(tree_ce
, new_ce_len
);
819 memset(tree_ce
, 0, new_ce_len
);
822 tree_ce
->ce_flags
= create_ce_flags(0);
824 for (d
= 1; d
<= nr_names
; d
++)
828 tree_ce
->ce_mode
= src
[0]->ce_mode
;
829 tree_ce
->ce_namelen
= len
;
830 oidcpy(&tree_ce
->oid
, &src
[0]->oid
);
831 memcpy(tree_ce
->name
, src
[0]->name
, len
+ 1);
833 rc
= call_unpack_fn((const struct cache_entry
* const *)src
, o
);
839 mark_ce_used(src
[0], o
);
843 printf("Unpacked %d entries from %s to %s using cache-tree\n",
845 o
->src_index
->cache
[pos
]->name
,
846 o
->src_index
->cache
[pos
+ nr_entries
- 1]->name
);
850 static int traverse_trees_recursive(int n
, unsigned long dirmask
,
851 unsigned long df_conflicts
,
852 struct name_entry
*names
,
853 struct traverse_info
*info
)
855 struct unpack_trees_options
*o
= info
->data
;
858 struct tree_desc t
[MAX_UNPACK_TREES
];
859 void *buf
[MAX_UNPACK_TREES
];
860 struct traverse_info newinfo
;
861 struct name_entry
*p
;
864 nr_entries
= all_trees_same_as_cache_tree(n
, dirmask
, names
, info
);
865 if (nr_entries
> 0) {
866 int pos
= index_pos_by_traverse_info(names
, info
);
868 if (!o
->merge
|| df_conflicts
)
869 BUG("Wrong condition to get here buddy");
872 * All entries up to 'pos' must have been processed
873 * (i.e. marked CE_UNPACKED) at this point. But to be safe,
874 * save and restore cache_bottom anyway to not miss
875 * unprocessed entries before 'pos'.
877 bottom
= o
->cache_bottom
;
878 ret
= traverse_by_cache_tree(pos
, nr_entries
, n
, info
);
879 o
->cache_bottom
= bottom
;
889 newinfo
.pathspec
= info
->pathspec
;
890 newinfo
.name
= p
->path
;
891 newinfo
.namelen
= p
->pathlen
;
892 newinfo
.mode
= p
->mode
;
893 newinfo
.pathlen
= st_add3(newinfo
.pathlen
, tree_entry_len(p
), 1);
894 newinfo
.df_conflicts
|= df_conflicts
;
897 * Fetch the tree from the ODB for each peer directory in the
900 * For 2- and 3-way traversals, we try to avoid hitting the
901 * ODB twice for the same OID. This should yield a nice speed
902 * up in checkouts and merges when the commits are similar.
904 * We don't bother doing the full O(n^2) search for larger n,
905 * because wider traversals don't happen that often and we
906 * avoid the search setup.
908 * When 2 peer OIDs are the same, we just copy the tree
909 * descriptor data. This implicitly borrows the buffer
910 * data from the earlier cell.
912 for (i
= 0; i
< n
; i
++, dirmask
>>= 1) {
913 if (i
> 0 && are_same_oid(&names
[i
], &names
[i
- 1]))
915 else if (i
> 1 && are_same_oid(&names
[i
], &names
[i
- 2]))
918 const struct object_id
*oid
= NULL
;
921 buf
[nr_buf
++] = fill_tree_descriptor(the_repository
, t
+ i
, oid
);
925 bottom
= switch_cache_bottom(&newinfo
);
926 ret
= traverse_trees(o
->src_index
, n
, t
, &newinfo
);
927 restore_cache_bottom(&newinfo
, bottom
);
929 for (i
= 0; i
< nr_buf
; i
++)
936 * Compare the traverse-path to the cache entry without actually
937 * having to generate the textual representation of the traverse
940 * NOTE! This *only* compares up to the size of the traverse path
941 * itself - the caller needs to do the final check for the cache
942 * entry having more data at the end!
944 static int do_compare_entry_piecewise(const struct cache_entry
*ce
,
945 const struct traverse_info
*info
,
946 const char *name
, size_t namelen
,
953 int cmp
= do_compare_entry_piecewise(ce
, info
->prev
,
954 info
->name
, info
->namelen
,
959 pathlen
= info
->pathlen
;
960 ce_len
= ce_namelen(ce
);
962 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
963 if (ce_len
< pathlen
)
967 ce_name
= ce
->name
+ pathlen
;
969 return df_name_compare(ce_name
, ce_len
, S_IFREG
, name
, namelen
, mode
);
972 static int do_compare_entry(const struct cache_entry
*ce
,
973 const struct traverse_info
*info
,
974 const char *name
, size_t namelen
,
983 * If we have not precomputed the traverse path, it is quicker
984 * to avoid doing so. But if we have precomputed it,
985 * it is quicker to use the precomputed version.
987 if (!info
->traverse_path
)
988 return do_compare_entry_piecewise(ce
, info
, name
, namelen
, mode
);
990 cmp
= strncmp(ce
->name
, info
->traverse_path
, info
->pathlen
);
994 pathlen
= info
->pathlen
;
995 ce_len
= ce_namelen(ce
);
997 if (ce_len
< pathlen
)
1001 ce_name
= ce
->name
+ pathlen
;
1003 ce_mode
= S_ISSPARSEDIR(ce
->ce_mode
) ? S_IFDIR
: S_IFREG
;
1004 return df_name_compare(ce_name
, ce_len
, ce_mode
, name
, namelen
, mode
);
1007 static int compare_entry(const struct cache_entry
*ce
, const struct traverse_info
*info
, const struct name_entry
*n
)
1009 int cmp
= do_compare_entry(ce
, info
, n
->path
, n
->pathlen
, n
->mode
);
1014 * At this point, we know that we have a prefix match. If ce
1015 * is a sparse directory, then allow an exact match. This only
1016 * works when the input name is a directory, since ce->name
1017 * ends in a directory separator.
1019 if (S_ISSPARSEDIR(ce
->ce_mode
) &&
1020 ce
->ce_namelen
== traverse_path_len(info
, tree_entry_len(n
)) + 1)
1024 * Even if the beginning compared identically, the ce should
1025 * compare as bigger than a directory leading up to it!
1027 return ce_namelen(ce
) > traverse_path_len(info
, tree_entry_len(n
));
1030 static int ce_in_traverse_path(const struct cache_entry
*ce
,
1031 const struct traverse_info
*info
)
1035 if (do_compare_entry(ce
, info
->prev
,
1036 info
->name
, info
->namelen
, info
->mode
))
1039 * If ce (blob) is the same name as the path (which is a tree
1040 * we will be descending into), it won't be inside it.
1042 return (info
->pathlen
< ce_namelen(ce
));
1045 static struct cache_entry
*create_ce_entry(const struct traverse_info
*info
,
1046 const struct name_entry
*n
,
1048 struct index_state
*istate
,
1050 int is_sparse_directory
)
1052 size_t len
= traverse_path_len(info
, tree_entry_len(n
));
1053 size_t alloc_len
= is_sparse_directory
? len
+ 1 : len
;
1054 struct cache_entry
*ce
=
1056 make_empty_transient_cache_entry(alloc_len
, NULL
) :
1057 make_empty_cache_entry(istate
, alloc_len
);
1059 ce
->ce_mode
= create_ce_mode(n
->mode
);
1060 ce
->ce_flags
= create_ce_flags(stage
);
1061 ce
->ce_namelen
= len
;
1062 oidcpy(&ce
->oid
, &n
->oid
);
1063 /* len+1 because the cache_entry allocates space for NUL */
1064 make_traverse_path(ce
->name
, len
+ 1, info
, n
->path
, n
->pathlen
);
1066 if (is_sparse_directory
) {
1067 ce
->name
[len
] = '/';
1068 ce
->name
[len
+ 1] = '\0';
1070 ce
->ce_flags
|= CE_SKIP_WORKTREE
;
1077 * Determine whether the path specified by 'p' should be unpacked as a new
1078 * sparse directory in a sparse index. A new sparse directory 'A/':
1079 * - must be outside the sparse cone.
1080 * - must not already be in the index (i.e., no index entry with name 'A/'
1082 * - must not have any child entries in the index (i.e., no index entry
1083 * 'A/<something>' exists).
1084 * If 'p' meets the above requirements, return 1; otherwise, return 0.
1086 static int entry_is_new_sparse_dir(const struct traverse_info
*info
,
1087 const struct name_entry
*p
)
1090 struct strbuf dirpath
= STRBUF_INIT
;
1091 struct unpack_trees_options
*o
= info
->data
;
1093 if (!S_ISDIR(p
->mode
))
1097 * If the path is inside the sparse cone, it can't be a sparse directory.
1099 strbuf_add(&dirpath
, info
->traverse_path
, info
->pathlen
);
1100 strbuf_add(&dirpath
, p
->path
, p
->pathlen
);
1101 strbuf_addch(&dirpath
, '/');
1102 if (path_in_cone_mode_sparse_checkout(dirpath
.buf
, o
->src_index
)) {
1107 pos
= index_name_pos_sparse(o
->src_index
, dirpath
.buf
, dirpath
.len
);
1109 /* Path is already in the index, not a new sparse dir */
1114 /* Where would this sparse dir be inserted into the index? */
1116 if (pos
>= o
->src_index
->cache_nr
) {
1118 * Sparse dir would be inserted at the end of the index, so we
1119 * know it has no child entries.
1126 * If the dir has child entries in the index, the first would be at the
1127 * position the sparse directory would be inserted. If the entry at this
1128 * position is inside the dir, not a new sparse dir.
1130 res
= strncmp(o
->src_index
->cache
[pos
]->name
, dirpath
.buf
, dirpath
.len
);
1133 strbuf_release(&dirpath
);
1138 * Note that traverse_by_cache_tree() duplicates some logic in this function
1139 * without actually calling it. If you change the logic here you may need to
1140 * check and change there as well.
1142 static int unpack_single_entry(int n
, unsigned long mask
,
1143 unsigned long dirmask
,
1144 struct cache_entry
**src
,
1145 const struct name_entry
*names
,
1146 const struct traverse_info
*info
,
1147 int *is_new_sparse_dir
)
1150 struct unpack_trees_options
*o
= info
->data
;
1151 unsigned long conflicts
= info
->df_conflicts
| dirmask
;
1152 const struct name_entry
*p
= names
;
1154 *is_new_sparse_dir
= 0;
1155 if (mask
== dirmask
&& !src
[0]) {
1157 * If we're not in a sparse index, we can't unpack a directory
1158 * without recursing into it, so we return.
1160 if (!o
->src_index
->sparse_index
)
1163 /* Find first entry with a real name (we could use "mask" too) */
1168 * If the directory is completely missing from the index but
1169 * would otherwise be a sparse directory, we should unpack it.
1170 * If not, we'll return and continue recursively traversing the
1173 *is_new_sparse_dir
= entry_is_new_sparse_dir(info
, p
);
1174 if (!*is_new_sparse_dir
)
1179 * When we are unpacking a sparse directory, then this isn't necessarily
1180 * a directory-file conflict.
1182 if (mask
== dirmask
&&
1183 (*is_new_sparse_dir
|| (src
[0] && S_ISSPARSEDIR(src
[0]->ce_mode
))))
1187 * Ok, we've filled in up to any potential index entry in src[0],
1190 for (i
= 0; i
< n
; i
++) {
1192 unsigned int bit
= 1ul << i
;
1193 if (conflicts
& bit
) {
1194 src
[i
+ o
->merge
] = o
->df_conflict_entry
;
1201 else if (i
+ 1 < o
->head_idx
)
1203 else if (i
+ 1 > o
->head_idx
)
1209 * If the merge bit is set, then the cache entries are
1210 * discarded in the following block. In this case,
1211 * construct "transient" cache_entries, as they are
1212 * not stored in the index. otherwise construct the
1213 * cache entry from the index aware logic.
1215 src
[i
+ o
->merge
] = create_ce_entry(info
, names
+ i
, stage
,
1216 &o
->result
, o
->merge
,
1221 int rc
= call_unpack_fn((const struct cache_entry
* const *)src
,
1223 for (i
= 0; i
< n
; i
++) {
1224 struct cache_entry
*ce
= src
[i
+ o
->merge
];
1225 if (ce
!= o
->df_conflict_entry
)
1226 discard_cache_entry(ce
);
1231 for (i
= 0; i
< n
; i
++)
1232 if (src
[i
] && src
[i
] != o
->df_conflict_entry
)
1233 if (do_add_entry(o
, src
[i
], 0, 0))
1239 static int unpack_failed(struct unpack_trees_options
*o
, const char *message
)
1241 discard_index(&o
->result
);
1242 if (!o
->quiet
&& !o
->exiting_early
) {
1244 return error("%s", message
);
1251 * The tree traversal is looking at name p. If we have a matching entry,
1252 * return it. If name p is a directory in the index, do not return
1253 * anything, as we will want to match it when the traversal descends into
1256 static int find_cache_pos(struct traverse_info
*info
,
1257 const char *p
, size_t p_len
)
1260 struct unpack_trees_options
*o
= info
->data
;
1261 struct index_state
*index
= o
->src_index
;
1262 int pfxlen
= info
->pathlen
;
1264 for (pos
= o
->cache_bottom
; pos
< index
->cache_nr
; pos
++) {
1265 const struct cache_entry
*ce
= index
->cache
[pos
];
1266 const char *ce_name
, *ce_slash
;
1269 if (ce
->ce_flags
& CE_UNPACKED
) {
1271 * cache_bottom entry is already unpacked, so
1272 * we can never match it; don't check it
1275 if (pos
== o
->cache_bottom
)
1279 if (!ce_in_traverse_path(ce
, info
)) {
1281 * Check if we can skip future cache checks
1282 * (because we're already past all possible
1283 * entries in the traverse path).
1285 if (info
->traverse_path
) {
1286 if (strncmp(ce
->name
, info
->traverse_path
,
1292 ce_name
= ce
->name
+ pfxlen
;
1293 ce_slash
= strchr(ce_name
, '/');
1295 ce_len
= ce_slash
- ce_name
;
1297 ce_len
= ce_namelen(ce
) - pfxlen
;
1298 cmp
= name_compare(p
, p_len
, ce_name
, ce_len
);
1300 * Exact match; if we have a directory we need to
1301 * delay returning it.
1304 return ce_slash
? -2 - pos
: pos
;
1306 continue; /* keep looking */
1308 * ce_name sorts after p->path; could it be that we
1309 * have files under p->path directory in the index?
1310 * E.g. ce_name == "t-i", and p->path == "t"; we may
1311 * have "t/a" in the index.
1313 if (p_len
< ce_len
&& !memcmp(ce_name
, p
, p_len
) &&
1314 ce_name
[p_len
] < '/')
1315 continue; /* keep looking */
1322 * Given a sparse directory entry 'ce', compare ce->name to
1323 * info->traverse_path + p->path + '/' if info->traverse_path
1326 * Compare ce->name to p->path + '/' otherwise. Note that
1327 * ce->name must end in a trailing '/' because it is a sparse
1330 static int sparse_dir_matches_path(const struct cache_entry
*ce
,
1331 struct traverse_info
*info
,
1332 const struct name_entry
*p
)
1334 assert(S_ISSPARSEDIR(ce
->ce_mode
));
1335 assert(ce
->name
[ce
->ce_namelen
- 1] == '/');
1338 return ce
->ce_namelen
== info
->pathlen
+ p
->pathlen
+ 1 &&
1339 ce
->name
[info
->pathlen
- 1] == '/' &&
1340 !strncmp(ce
->name
, info
->traverse_path
, info
->pathlen
) &&
1341 !strncmp(ce
->name
+ info
->pathlen
, p
->path
, p
->pathlen
);
1342 return ce
->ce_namelen
== p
->pathlen
+ 1 &&
1343 !strncmp(ce
->name
, p
->path
, p
->pathlen
);
1346 static struct cache_entry
*find_cache_entry(struct traverse_info
*info
,
1347 const struct name_entry
*p
)
1350 int pos
= find_cache_pos(info
, p
->path
, p
->pathlen
);
1351 struct unpack_trees_options
*o
= info
->data
;
1354 return o
->src_index
->cache
[pos
];
1357 * Check for a sparse-directory entry named "path/".
1358 * Due to the input p->path not having a trailing
1359 * slash, the negative 'pos' value overshoots the
1360 * expected position, hence "-2" instead of "-1".
1364 if (pos
< 0 || pos
>= o
->src_index
->cache_nr
)
1368 * Due to lexicographic sorting and sparse directory
1369 * entries ending with a trailing slash, our path as a
1370 * sparse directory (e.g "subdir/") and our path as a
1371 * file (e.g. "subdir") might be separated by other
1372 * paths (e.g. "subdir-").
1375 struct cache_entry
*ce
= o
->src_index
->cache
[pos
];
1377 if (!skip_prefix(ce
->name
, info
->traverse_path
, &path
) ||
1378 strncmp(path
, p
->path
, p
->pathlen
) ||
1379 path
[p
->pathlen
] != '/')
1382 if (S_ISSPARSEDIR(ce
->ce_mode
) &&
1383 sparse_dir_matches_path(ce
, info
, p
))
1392 static void debug_path(struct traverse_info
*info
)
1395 debug_path(info
->prev
);
1396 if (*info
->prev
->name
)
1399 printf("%s", info
->name
);
1402 static void debug_name_entry(int i
, struct name_entry
*n
)
1404 printf("ent#%d %06o %s\n", i
,
1405 n
->path
? n
->mode
: 0,
1406 n
->path
? n
->path
: "(missing)");
1409 static void debug_unpack_callback(int n
,
1411 unsigned long dirmask
,
1412 struct name_entry
*names
,
1413 struct traverse_info
*info
)
1416 printf("* unpack mask %lu, dirmask %lu, cnt %d ",
1420 for (i
= 0; i
< n
; i
++)
1421 debug_name_entry(i
, names
+ i
);
1425 * Returns true if and only if the given cache_entry is a
1426 * sparse-directory entry that matches the given name_entry
1427 * from the tree walk at the given traverse_info.
1429 static int is_sparse_directory_entry(struct cache_entry
*ce
,
1430 const struct name_entry
*name
,
1431 struct traverse_info
*info
)
1433 if (!ce
|| !name
|| !S_ISSPARSEDIR(ce
->ce_mode
))
1436 return sparse_dir_matches_path(ce
, info
, name
);
1439 static int unpack_sparse_callback(int n
, unsigned long mask
, unsigned long dirmask
, struct name_entry
*names
, struct traverse_info
*info
)
1441 struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
1442 struct unpack_trees_options
*o
= info
->data
;
1443 int ret
, is_new_sparse_dir
;
1448 * Unlike in 'unpack_callback', where src[0] is derived from the index when
1449 * merging, src[0] is a transient cache entry derived from the first tree
1450 * provided. Create the temporary entry as if it came from a non-sparse index.
1452 if (!is_null_oid(&names
[0].oid
)) {
1453 src
[0] = create_ce_entry(info
, &names
[0], 0,
1455 dirmask
& (1ul << 0));
1456 src
[0]->ce_flags
|= (CE_SKIP_WORKTREE
| CE_NEW_SKIP_WORKTREE
);
1460 * 'unpack_single_entry' assumes that src[0] is derived directly from
1461 * the index, rather than from an entry in 'names'. This is *not* true when
1462 * merging a sparse directory, in which case names[0] is the "index" source
1463 * entry. To match the expectations of 'unpack_single_entry', shift past the
1464 * "index" tree (i.e., names[0]) and adjust 'names', 'n', 'mask', and
1465 * 'dirmask' accordingly.
1467 ret
= unpack_single_entry(n
- 1, mask
>> 1, dirmask
>> 1, src
, names
+ 1, info
, &is_new_sparse_dir
);
1470 discard_cache_entry(src
[0]);
1472 return ret
>= 0 ? mask
: -1;
1476 * Note that traverse_by_cache_tree() duplicates some logic in this function
1477 * without actually calling it. If you change the logic here you may need to
1478 * check and change there as well.
1480 static int unpack_callback(int n
, unsigned long mask
, unsigned long dirmask
, struct name_entry
*names
, struct traverse_info
*info
)
1482 struct cache_entry
*src
[MAX_UNPACK_TREES
+ 1] = { NULL
, };
1483 struct unpack_trees_options
*o
= info
->data
;
1484 const struct name_entry
*p
= names
;
1485 int is_new_sparse_dir
;
1487 /* Find first entry with a real name (we could use "mask" too) */
1491 if (o
->debug_unpack
)
1492 debug_unpack_callback(n
, mask
, dirmask
, names
, info
);
1494 /* Are we supposed to look at the index too? */
1498 struct cache_entry
*ce
;
1500 if (o
->diff_index_cached
)
1501 ce
= next_cache_entry(o
);
1503 ce
= find_cache_entry(info
, p
);
1507 cmp
= compare_entry(ce
, info
, p
);
1509 if (unpack_index_entry(ce
, o
) < 0)
1510 return unpack_failed(o
, NULL
);
1516 * If we skip unmerged index
1517 * entries, we'll skip this
1518 * entry *and* the tree
1519 * entries associated with it!
1521 if (o
->skip_unmerged
) {
1522 add_same_unmerged(ce
, o
);
1532 if (unpack_single_entry(n
, mask
, dirmask
, src
, names
, info
, &is_new_sparse_dir
))
1535 if (o
->merge
&& src
[0]) {
1536 if (ce_stage(src
[0]))
1537 mark_ce_used_same_name(src
[0], o
);
1539 mark_ce_used(src
[0], o
);
1542 /* Now handle any directories.. */
1544 /* special case: "diff-index --cached" looking at a tree */
1545 if (o
->diff_index_cached
&&
1546 n
== 1 && dirmask
== 1 && S_ISDIR(names
->mode
)) {
1548 matches
= cache_tree_matches_traversal(o
->src_index
->cache_tree
,
1551 * Everything under the name matches; skip the
1552 * entire hierarchy. diff_index_cached codepath
1553 * special cases D/F conflicts in such a way that
1554 * it does not do any look-ahead, so this is safe.
1558 * Only increment the cache_bottom if the
1559 * directory isn't a sparse directory index
1560 * entry (if it is, it was already incremented)
1561 * in 'mark_ce_used()'
1563 if (!src
[0] || !S_ISSPARSEDIR(src
[0]->ce_mode
))
1564 o
->cache_bottom
+= matches
;
1569 if (!is_sparse_directory_entry(src
[0], p
, info
) &&
1570 !is_new_sparse_dir
&&
1571 traverse_trees_recursive(n
, dirmask
, mask
& ~dirmask
,
1582 static int clear_ce_flags_1(struct index_state
*istate
,
1583 struct cache_entry
**cache
, int nr
,
1584 struct strbuf
*prefix
,
1585 int select_mask
, int clear_mask
,
1586 struct pattern_list
*pl
,
1587 enum pattern_match_result default_match
,
1590 /* Whole directory matching */
1591 static int clear_ce_flags_dir(struct index_state
*istate
,
1592 struct cache_entry
**cache
, int nr
,
1593 struct strbuf
*prefix
,
1595 int select_mask
, int clear_mask
,
1596 struct pattern_list
*pl
,
1597 enum pattern_match_result default_match
,
1600 struct cache_entry
**cache_end
;
1603 enum pattern_match_result ret
, orig_ret
;
1604 orig_ret
= path_matches_pattern_list(prefix
->buf
, prefix
->len
,
1605 basename
, &dtype
, pl
, istate
);
1607 strbuf_addch(prefix
, '/');
1609 /* If undecided, use matching result of parent dir in defval */
1610 if (orig_ret
== UNDECIDED
)
1611 ret
= default_match
;
1615 for (cache_end
= cache
; cache_end
!= cache
+ nr
; cache_end
++) {
1616 struct cache_entry
*ce
= *cache_end
;
1617 if (strncmp(ce
->name
, prefix
->buf
, prefix
->len
))
1621 if (pl
->use_cone_patterns
&& orig_ret
== MATCHED_RECURSIVE
) {
1622 struct cache_entry
**ce
= cache
;
1623 rc
= cache_end
- cache
;
1625 while (ce
< cache_end
) {
1626 (*ce
)->ce_flags
&= ~clear_mask
;
1629 } else if (pl
->use_cone_patterns
&& orig_ret
== NOT_MATCHED
) {
1630 rc
= cache_end
- cache
;
1632 rc
= clear_ce_flags_1(istate
, cache
, cache_end
- cache
,
1634 select_mask
, clear_mask
,
1639 strbuf_setlen(prefix
, prefix
->len
- 1);
1644 * Traverse the index, find every entry that matches according to
1645 * o->pl. Do "ce_flags &= ~clear_mask" on those entries. Return the
1646 * number of traversed entries.
1648 * If select_mask is non-zero, only entries whose ce_flags has on of
1649 * those bits enabled are traversed.
1651 * cache : pointer to an index entry
1652 * prefix_len : an offset to its path
1654 * The current path ("prefix") including the trailing '/' is
1655 * cache[0]->name[0..(prefix_len-1)]
1656 * Top level path has prefix_len zero.
1658 static int clear_ce_flags_1(struct index_state
*istate
,
1659 struct cache_entry
**cache
, int nr
,
1660 struct strbuf
*prefix
,
1661 int select_mask
, int clear_mask
,
1662 struct pattern_list
*pl
,
1663 enum pattern_match_result default_match
,
1666 struct cache_entry
**cache_end
= nr
? cache
+ nr
: cache
;
1669 * Process all entries that have the given prefix and meet
1670 * select_mask condition
1672 while(cache
!= cache_end
) {
1673 struct cache_entry
*ce
= *cache
;
1674 const char *name
, *slash
;
1676 enum pattern_match_result ret
;
1678 display_progress(istate
->progress
, progress_nr
);
1680 if (select_mask
&& !(ce
->ce_flags
& select_mask
)) {
1686 if (prefix
->len
&& strncmp(ce
->name
, prefix
->buf
, prefix
->len
))
1689 name
= ce
->name
+ prefix
->len
;
1690 slash
= strchr(name
, '/');
1692 /* If it's a directory, try whole directory match first */
1697 strbuf_add(prefix
, name
, len
);
1699 processed
= clear_ce_flags_dir(istate
, cache
, cache_end
- cache
,
1701 prefix
->buf
+ prefix
->len
- len
,
1702 select_mask
, clear_mask
,
1706 /* clear_c_f_dir eats a whole dir already? */
1709 progress_nr
+= processed
;
1710 strbuf_setlen(prefix
, prefix
->len
- len
);
1714 strbuf_addch(prefix
, '/');
1715 processed
= clear_ce_flags_1(istate
, cache
, cache_end
- cache
,
1717 select_mask
, clear_mask
, pl
,
1718 default_match
, progress_nr
);
1721 progress_nr
+= processed
;
1723 strbuf_setlen(prefix
, prefix
->len
- len
- 1);
1728 dtype
= ce_to_dtype(ce
);
1729 ret
= path_matches_pattern_list(ce
->name
,
1731 name
, &dtype
, pl
, istate
);
1732 if (ret
== UNDECIDED
)
1733 ret
= default_match
;
1734 if (ret
== MATCHED
|| ret
== MATCHED_RECURSIVE
)
1735 ce
->ce_flags
&= ~clear_mask
;
1740 display_progress(istate
->progress
, progress_nr
);
1741 return nr
- (cache_end
- cache
);
1744 static int clear_ce_flags(struct index_state
*istate
,
1745 int select_mask
, int clear_mask
,
1746 struct pattern_list
*pl
,
1749 static struct strbuf prefix
= STRBUF_INIT
;
1753 strbuf_reset(&prefix
);
1755 istate
->progress
= start_delayed_progress(
1756 _("Updating index flags"),
1759 xsnprintf(label
, sizeof(label
), "clear_ce_flags(0x%08lx,0x%08lx)",
1760 (unsigned long)select_mask
, (unsigned long)clear_mask
);
1761 trace2_region_enter("unpack_trees", label
, the_repository
);
1762 rval
= clear_ce_flags_1(istate
,
1766 select_mask
, clear_mask
,
1768 trace2_region_leave("unpack_trees", label
, the_repository
);
1770 stop_progress(&istate
->progress
);
1775 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout
1777 static void mark_new_skip_worktree(struct pattern_list
*pl
,
1778 struct index_state
*istate
,
1779 int select_flag
, int skip_wt_flag
,
1785 * 1. Pretend the narrowest worktree: only unmerged entries
1788 for (i
= 0; i
< istate
->cache_nr
; i
++) {
1789 struct cache_entry
*ce
= istate
->cache
[i
];
1791 if (select_flag
&& !(ce
->ce_flags
& select_flag
))
1794 if (!ce_stage(ce
) && !(ce
->ce_flags
& CE_CONFLICTED
))
1795 ce
->ce_flags
|= skip_wt_flag
;
1797 ce
->ce_flags
&= ~skip_wt_flag
;
1801 * 2. Widen worktree according to sparse-checkout file.
1802 * Matched entries will have skip_wt_flag cleared (i.e. "in")
1804 clear_ce_flags(istate
, select_flag
, skip_wt_flag
, pl
, show_progress
);
1807 static void populate_from_existing_patterns(struct unpack_trees_options
*o
,
1808 struct pattern_list
*pl
)
1810 if (get_sparse_checkout_patterns(pl
) < 0)
1811 o
->skip_sparse_checkout
= 1;
1816 static void update_sparsity_for_prefix(const char *prefix
,
1817 struct index_state
*istate
)
1819 int prefix_len
= strlen(prefix
);
1820 struct strbuf ce_prefix
= STRBUF_INIT
;
1822 if (!istate
->sparse_index
)
1825 while (prefix_len
> 0 && prefix
[prefix_len
- 1] == '/')
1828 if (prefix_len
<= 0)
1829 BUG("Invalid prefix passed to update_sparsity_for_prefix");
1831 strbuf_grow(&ce_prefix
, prefix_len
+ 1);
1832 strbuf_add(&ce_prefix
, prefix
, prefix_len
);
1833 strbuf_addch(&ce_prefix
, '/');
1836 * If the prefix points to a sparse directory or a path inside a sparse
1837 * directory, the index should be expanded. This is accomplished in one
1839 * - if the prefix is inside a sparse directory, it will be expanded by
1840 * the 'ensure_full_index(...)' call in 'index_name_pos(...)'.
1841 * - if the prefix matches an existing sparse directory entry,
1842 * 'index_name_pos(...)' will return its index position, triggering
1843 * the 'ensure_full_index(...)' below.
1845 if (!path_in_cone_mode_sparse_checkout(ce_prefix
.buf
, istate
) &&
1846 index_name_pos(istate
, ce_prefix
.buf
, ce_prefix
.len
) >= 0)
1847 ensure_full_index(istate
);
1849 strbuf_release(&ce_prefix
);
1852 static int verify_absent(const struct cache_entry
*,
1853 enum unpack_trees_error_types
,
1854 struct unpack_trees_options
*);
1856 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the
1857 * resulting index, -2 on failure to reflect the changes to the work tree.
1859 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally
1861 int unpack_trees(unsigned len
, struct tree_desc
*t
, struct unpack_trees_options
*o
)
1863 struct repository
*repo
= the_repository
;
1865 static struct cache_entry
*dfc
;
1866 struct pattern_list pl
;
1867 int free_pattern_list
= 0;
1868 struct dir_struct dir
= DIR_INIT
;
1870 if (o
->reset
== UNPACK_RESET_INVALID
)
1871 BUG("o->reset had a value of 1; should be UNPACK_TREES_*_UNTRACKED");
1873 if (len
> MAX_UNPACK_TREES
)
1874 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES
);
1876 BUG("o->dir is for internal use only");
1878 trace_performance_enter();
1879 trace2_region_enter("unpack_trees", "unpack_trees", the_repository
);
1881 prepare_repo_settings(repo
);
1882 if (repo
->settings
.command_requires_full_index
) {
1883 ensure_full_index(o
->src_index
);
1885 ensure_full_index(o
->dst_index
);
1888 if (o
->reset
== UNPACK_RESET_OVERWRITE_UNTRACKED
&&
1889 o
->preserve_ignored
)
1890 BUG("UNPACK_RESET_OVERWRITE_UNTRACKED incompatible with preserved ignored files");
1892 if (!o
->preserve_ignored
) {
1894 o
->dir
->flags
|= DIR_SHOW_IGNORED
;
1895 setup_standard_excludes(o
->dir
);
1899 update_sparsity_for_prefix(o
->prefix
, o
->src_index
);
1901 if (!core_apply_sparse_checkout
|| !o
->update
)
1902 o
->skip_sparse_checkout
= 1;
1903 if (!o
->skip_sparse_checkout
&& !o
->pl
) {
1904 memset(&pl
, 0, sizeof(pl
));
1905 free_pattern_list
= 1;
1906 populate_from_existing_patterns(o
, &pl
);
1909 index_state_init(&o
->result
, o
->src_index
->repo
);
1910 o
->result
.initialized
= 1;
1911 o
->result
.timestamp
.sec
= o
->src_index
->timestamp
.sec
;
1912 o
->result
.timestamp
.nsec
= o
->src_index
->timestamp
.nsec
;
1913 o
->result
.version
= o
->src_index
->version
;
1914 if (!o
->src_index
->split_index
) {
1915 o
->result
.split_index
= NULL
;
1916 } else if (o
->src_index
== o
->dst_index
) {
1918 * o->dst_index (and thus o->src_index) will be discarded
1919 * and overwritten with o->result at the end of this function,
1920 * so just use src_index's split_index to avoid having to
1923 o
->result
.split_index
= o
->src_index
->split_index
;
1924 o
->result
.split_index
->refcount
++;
1926 o
->result
.split_index
= init_split_index(&o
->result
);
1928 oidcpy(&o
->result
.oid
, &o
->src_index
->oid
);
1929 o
->merge_size
= len
;
1930 mark_all_ce_unused(o
->src_index
);
1932 o
->result
.fsmonitor_last_update
=
1933 xstrdup_or_null(o
->src_index
->fsmonitor_last_update
);
1934 o
->result
.fsmonitor_has_run_once
= o
->src_index
->fsmonitor_has_run_once
;
1936 if (!o
->src_index
->initialized
&&
1937 !repo
->settings
.command_requires_full_index
&&
1938 is_sparse_index_allowed(&o
->result
, 0))
1939 o
->result
.sparse_index
= 1;
1942 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries
1944 if (!o
->skip_sparse_checkout
)
1945 mark_new_skip_worktree(o
->pl
, o
->src_index
, 0,
1946 CE_NEW_SKIP_WORKTREE
, o
->verbose_update
);
1949 dfc
= xcalloc(1, cache_entry_size(0));
1950 o
->df_conflict_entry
= dfc
;
1953 const char *prefix
= o
->prefix
? o
->prefix
: "";
1954 struct traverse_info info
;
1956 setup_traverse_info(&info
, prefix
);
1957 info
.fn
= unpack_callback
;
1959 info
.show_all_errors
= o
->show_all_errors
;
1960 info
.pathspec
= o
->pathspec
;
1964 * Unpack existing index entries that sort before the
1965 * prefix the tree is spliced into. Note that o->merge
1966 * is always true in this case.
1969 struct cache_entry
*ce
= next_cache_entry(o
);
1972 if (ce_in_traverse_path(ce
, &info
))
1974 if (unpack_index_entry(ce
, o
) < 0)
1979 trace_performance_enter();
1980 trace2_region_enter("unpack_trees", "traverse_trees", the_repository
);
1981 ret
= traverse_trees(o
->src_index
, len
, t
, &info
);
1982 trace2_region_leave("unpack_trees", "traverse_trees", the_repository
);
1983 trace_performance_leave("traverse_trees");
1988 /* Any left-over entries in the index? */
1991 struct cache_entry
*ce
= next_cache_entry(o
);
1994 if (unpack_index_entry(ce
, o
) < 0)
1998 mark_all_ce_unused(o
->src_index
);
2000 if (o
->trivial_merges_only
&& o
->nontrivial_merge
) {
2001 ret
= unpack_failed(o
, "Merge requires file-level merging");
2005 if (!o
->skip_sparse_checkout
) {
2007 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1
2008 * If they will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
2009 * so apply_sparse_checkout() won't attempt to remove it from worktree
2011 mark_new_skip_worktree(o
->pl
, &o
->result
,
2012 CE_ADDED
, CE_SKIP_WORKTREE
| CE_NEW_SKIP_WORKTREE
,
2016 for (i
= 0; i
< o
->result
.cache_nr
; i
++) {
2017 struct cache_entry
*ce
= o
->result
.cache
[i
];
2020 * Entries marked with CE_ADDED in merged_entry() do not have
2021 * verify_absent() check (the check is effectively disabled
2022 * because CE_NEW_SKIP_WORKTREE is set unconditionally).
2024 * Do the real check now because we have had
2025 * correct CE_NEW_SKIP_WORKTREE
2027 if (ce
->ce_flags
& CE_ADDED
&&
2028 verify_absent(ce
, WARNING_SPARSE_ORPHANED_NOT_OVERWRITTEN
, o
))
2031 if (apply_sparse_checkout(&o
->result
, ce
, o
))
2036 * Inability to sparsify or de-sparsify individual
2037 * paths is not an error, but just a warning.
2039 if (o
->show_all_errors
)
2040 display_warning_msgs(o
);
2045 ret
= check_updates(o
, &o
->result
) ? (-2) : 0;
2047 move_index_extensions(&o
->result
, o
->src_index
);
2049 if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
2050 cache_tree_verify(the_repository
, &o
->result
);
2051 if (!o
->skip_cache_tree_update
&&
2052 !cache_tree_fully_valid(o
->result
.cache_tree
))
2053 cache_tree_update(&o
->result
,
2058 o
->result
.updated_workdir
= 1;
2059 discard_index(o
->dst_index
);
2060 *o
->dst_index
= o
->result
;
2062 discard_index(&o
->result
);
2064 o
->src_index
= NULL
;
2067 if (free_pattern_list
)
2068 clear_pattern_list(&pl
);
2073 trace2_region_leave("unpack_trees", "unpack_trees", the_repository
);
2074 trace_performance_leave("unpack_trees");
2078 if (o
->show_all_errors
)
2079 display_error_msgs(o
);
2080 mark_all_ce_unused(o
->src_index
);
2081 ret
= unpack_failed(o
, NULL
);
2082 if (o
->exiting_early
)
2088 * Update SKIP_WORKTREE bits according to sparsity patterns, and update
2089 * working directory to match.
2091 * CE_NEW_SKIP_WORKTREE is used internally.
2093 enum update_sparsity_result
update_sparsity(struct unpack_trees_options
*o
)
2095 enum update_sparsity_result ret
= UPDATE_SPARSITY_SUCCESS
;
2096 struct pattern_list pl
;
2098 unsigned old_show_all_errors
;
2099 int free_pattern_list
= 0;
2101 old_show_all_errors
= o
->show_all_errors
;
2102 o
->show_all_errors
= 1;
2105 if (!o
->update
|| o
->index_only
|| o
->skip_sparse_checkout
)
2106 BUG("update_sparsity() is for reflecting sparsity patterns in working directory");
2107 if (o
->src_index
!= o
->dst_index
|| o
->fn
)
2108 BUG("update_sparsity() called wrong");
2110 trace_performance_enter();
2112 /* If we weren't given patterns, use the recorded ones */
2114 memset(&pl
, 0, sizeof(pl
));
2115 free_pattern_list
= 1;
2116 populate_from_existing_patterns(o
, &pl
);
2117 if (o
->skip_sparse_checkout
)
2118 goto skip_sparse_checkout
;
2121 /* Expand sparse directories as needed */
2122 expand_index(o
->src_index
, o
->pl
);
2124 /* Set NEW_SKIP_WORKTREE on existing entries. */
2125 mark_all_ce_unused(o
->src_index
);
2126 mark_new_skip_worktree(o
->pl
, o
->src_index
, 0,
2127 CE_NEW_SKIP_WORKTREE
, o
->verbose_update
);
2129 /* Then loop over entries and update/remove as needed */
2130 ret
= UPDATE_SPARSITY_SUCCESS
;
2131 for (i
= 0; i
< o
->src_index
->cache_nr
; i
++) {
2132 struct cache_entry
*ce
= o
->src_index
->cache
[i
];
2136 /* -1 because for loop will increment by 1 */
2137 i
+= warn_conflicted_path(o
->src_index
, i
, o
) - 1;
2138 ret
= UPDATE_SPARSITY_WARNINGS
;
2142 if (apply_sparse_checkout(o
->src_index
, ce
, o
))
2143 ret
= UPDATE_SPARSITY_WARNINGS
;
2146 skip_sparse_checkout
:
2147 if (check_updates(o
, o
->src_index
))
2148 ret
= UPDATE_SPARSITY_WORKTREE_UPDATE_FAILURES
;
2150 display_warning_msgs(o
);
2151 o
->show_all_errors
= old_show_all_errors
;
2152 if (free_pattern_list
)
2153 clear_pattern_list(&pl
);
2154 trace_performance_leave("update_sparsity");
2158 /* Here come the merge functions */
2160 static int reject_merge(const struct cache_entry
*ce
,
2161 struct unpack_trees_options
*o
)
2163 return add_rejected_path(o
, ERROR_WOULD_OVERWRITE
, ce
->name
);
2166 static int same(const struct cache_entry
*a
, const struct cache_entry
*b
)
2172 if ((a
->ce_flags
| b
->ce_flags
) & CE_CONFLICTED
)
2174 return a
->ce_mode
== b
->ce_mode
&&
2175 oideq(&a
->oid
, &b
->oid
);
2180 * When a CE gets turned into an unmerged entry, we
2181 * want it to be up-to-date
2183 static int verify_uptodate_1(const struct cache_entry
*ce
,
2184 struct unpack_trees_options
*o
,
2185 enum unpack_trees_error_types error_type
)
2193 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again
2194 * if this entry is truly up-to-date because this file may be
2197 if ((ce
->ce_flags
& CE_VALID
) || ce_skip_worktree(ce
))
2198 ; /* keep checking */
2199 else if (o
->reset
|| ce_uptodate(ce
))
2202 if (!lstat(ce
->name
, &st
)) {
2203 int flags
= CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
;
2204 unsigned changed
= ie_match_stat(o
->src_index
, ce
, &st
, flags
);
2206 if (submodule_from_ce(ce
)) {
2207 int r
= check_submodule_move_head(ce
,
2208 "HEAD", oid_to_hex(&ce
->oid
), o
);
2210 return add_rejected_path(o
, error_type
, ce
->name
);
2217 * Historic default policy was to allow submodule to be out
2218 * of sync wrt the superproject index. If the submodule was
2219 * not considered interesting above, we don't care here.
2221 if (S_ISGITLINK(ce
->ce_mode
))
2226 if (errno
== ENOENT
)
2228 return add_rejected_path(o
, error_type
, ce
->name
);
2231 int verify_uptodate(const struct cache_entry
*ce
,
2232 struct unpack_trees_options
*o
)
2234 if (!o
->skip_sparse_checkout
&&
2235 (ce
->ce_flags
& CE_SKIP_WORKTREE
) &&
2236 (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
))
2238 return verify_uptodate_1(ce
, o
, ERROR_NOT_UPTODATE_FILE
);
2241 static int verify_uptodate_sparse(const struct cache_entry
*ce
,
2242 struct unpack_trees_options
*o
)
2244 return verify_uptodate_1(ce
, o
, WARNING_SPARSE_NOT_UPTODATE_FILE
);
2248 * TODO: We should actually invalidate o->result, not src_index [1].
2249 * But since cache tree and untracked cache both are not copied to
2250 * o->result until unpacking is complete, we invalidate them on
2251 * src_index instead with the assumption that they will be copied to
2252 * dst_index at the end.
2254 * [1] src_index->cache_tree is also used in unpack_callback() so if
2255 * we invalidate o->result, we need to update it to use
2256 * o->result.cache_tree as well.
2258 static void invalidate_ce_path(const struct cache_entry
*ce
,
2259 struct unpack_trees_options
*o
)
2263 cache_tree_invalidate_path(o
->src_index
, ce
->name
);
2264 untracked_cache_invalidate_path(o
->src_index
, ce
->name
, 1);
2268 * Check that checking out ce->sha1 in subdir ce->name is not
2269 * going to overwrite any working files.
2271 static int verify_clean_submodule(const char *old_sha1
,
2272 const struct cache_entry
*ce
,
2273 struct unpack_trees_options
*o
)
2275 if (!submodule_from_ce(ce
))
2278 return check_submodule_move_head(ce
, old_sha1
,
2279 oid_to_hex(&ce
->oid
), o
);
2282 static int verify_clean_subdirectory(const struct cache_entry
*ce
,
2283 struct unpack_trees_options
*o
)
2286 * we are about to extract "ce->name"; we would not want to lose
2287 * anything in the existing directory there.
2291 struct dir_struct d
;
2295 if (S_ISGITLINK(ce
->ce_mode
)) {
2296 struct object_id oid
;
2297 int sub_head
= resolve_gitlink_ref(ce
->name
, "HEAD", &oid
);
2299 * If we are not going to update the submodule, then
2302 if (!sub_head
&& oideq(&oid
, &ce
->oid
))
2304 return verify_clean_submodule(sub_head
? NULL
: oid_to_hex(&oid
),
2309 * First let's make sure we do not have a local modification
2310 * in that directory.
2312 namelen
= ce_namelen(ce
);
2313 for (i
= locate_in_src_index(ce
, o
);
2314 i
< o
->src_index
->cache_nr
;
2316 struct cache_entry
*ce2
= o
->src_index
->cache
[i
];
2317 int len
= ce_namelen(ce2
);
2318 if (len
< namelen
||
2319 strncmp(ce
->name
, ce2
->name
, namelen
) ||
2320 ce2
->name
[namelen
] != '/')
2323 * ce2->name is an entry in the subdirectory to be
2326 if (!ce_stage(ce2
)) {
2327 if (verify_uptodate(ce2
, o
))
2329 add_entry(o
, ce2
, CE_REMOVE
, 0);
2330 invalidate_ce_path(ce
, o
);
2331 mark_ce_used(ce2
, o
);
2336 /* Do not lose a locally present file that is not ignored. */
2337 pathbuf
= xstrfmt("%.*s/", namelen
, ce
->name
);
2339 memset(&d
, 0, sizeof(d
));
2341 d
.exclude_per_dir
= o
->dir
->exclude_per_dir
;
2342 i
= read_directory(&d
, o
->src_index
, pathbuf
, namelen
+1, NULL
);
2346 return add_rejected_path(o
, ERROR_NOT_UPTODATE_DIR
, ce
->name
);
2348 /* Do not lose startup_info->original_cwd */
2349 if (startup_info
->original_cwd
&&
2350 !strcmp(startup_info
->original_cwd
, ce
->name
))
2351 return add_rejected_path(o
, ERROR_CWD_IN_THE_WAY
, ce
->name
);
2357 * This gets called when there was no index entry for the tree entry 'dst',
2358 * but we found a file in the working tree that 'lstat()' said was fine,
2359 * and we're on a case-insensitive filesystem.
2361 * See if we can find a case-insensitive match in the index that also
2362 * matches the stat information, and assume it's that other file!
2364 static int icase_exists(struct unpack_trees_options
*o
, const char *name
, int len
, struct stat
*st
)
2366 const struct cache_entry
*src
;
2368 src
= index_file_exists(o
->src_index
, name
, len
, 1);
2369 return src
&& !ie_match_stat(o
->src_index
, src
, st
, CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
);
2372 enum absent_checking_type
{
2374 ABSENT_ANY_DIRECTORY
2377 static int check_ok_to_remove(const char *name
, int len
, int dtype
,
2378 const struct cache_entry
*ce
, struct stat
*st
,
2379 enum unpack_trees_error_types error_type
,
2380 enum absent_checking_type absent_type
,
2381 struct unpack_trees_options
*o
)
2383 const struct cache_entry
*result
;
2386 * It may be that the 'lstat()' succeeded even though
2387 * target 'ce' was absent, because there is an old
2388 * entry that is different only in case..
2390 * Ignore that lstat() if it matches.
2392 if (ignore_case
&& icase_exists(o
, name
, len
, st
))
2396 is_excluded(o
->dir
, o
->src_index
, name
, &dtype
))
2398 * ce->name is explicitly excluded, so it is Ok to
2402 if (S_ISDIR(st
->st_mode
)) {
2404 * We are checking out path "foo" and
2405 * found "foo/." in the working tree.
2406 * This is tricky -- if we have modified
2407 * files that are in "foo/" we would lose
2410 if (verify_clean_subdirectory(ce
, o
) < 0)
2415 /* If we only care about directories, then we can remove */
2416 if (absent_type
== ABSENT_ANY_DIRECTORY
)
2420 * The previous round may already have decided to
2421 * delete this path, which is in a subdirectory that
2422 * is being replaced with a blob.
2424 result
= index_file_exists(&o
->result
, name
, len
, 0);
2426 if (result
->ce_flags
& CE_REMOVE
)
2430 return add_rejected_path(o
, error_type
, name
);
2434 * We do not want to remove or overwrite a working tree file that
2435 * is not tracked, unless it is ignored.
2437 static int verify_absent_1(const struct cache_entry
*ce
,
2438 enum unpack_trees_error_types error_type
,
2439 enum absent_checking_type absent_type
,
2440 struct unpack_trees_options
*o
)
2445 if (o
->index_only
|| !o
->update
)
2448 if (o
->reset
== UNPACK_RESET_OVERWRITE_UNTRACKED
) {
2449 /* Avoid nuking startup_info->original_cwd... */
2450 if (startup_info
->original_cwd
&&
2451 !strcmp(startup_info
->original_cwd
, ce
->name
))
2452 return add_rejected_path(o
, ERROR_CWD_IN_THE_WAY
,
2454 /* ...but nuke anything else. */
2458 len
= check_leading_path(ce
->name
, ce_namelen(ce
), 0);
2465 path
= xmemdupz(ce
->name
, len
);
2466 if (lstat(path
, &st
))
2467 ret
= error_errno("cannot stat '%s'", path
);
2469 if (submodule_from_ce(ce
))
2470 ret
= check_submodule_move_head(ce
,
2471 oid_to_hex(&ce
->oid
),
2474 ret
= check_ok_to_remove(path
, len
, DT_UNKNOWN
, NULL
,
2480 } else if (lstat(ce
->name
, &st
)) {
2481 if (errno
!= ENOENT
)
2482 return error_errno("cannot stat '%s'", ce
->name
);
2485 if (submodule_from_ce(ce
))
2486 return check_submodule_move_head(ce
, oid_to_hex(&ce
->oid
),
2489 return check_ok_to_remove(ce
->name
, ce_namelen(ce
),
2490 ce_to_dtype(ce
), ce
, &st
,
2491 error_type
, absent_type
, o
);
2495 static int verify_absent(const struct cache_entry
*ce
,
2496 enum unpack_trees_error_types error_type
,
2497 struct unpack_trees_options
*o
)
2499 if (!o
->skip_sparse_checkout
&& (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
))
2501 return verify_absent_1(ce
, error_type
, COMPLETELY_ABSENT
, o
);
2504 static int verify_absent_if_directory(const struct cache_entry
*ce
,
2505 enum unpack_trees_error_types error_type
,
2506 struct unpack_trees_options
*o
)
2508 if (!o
->skip_sparse_checkout
&& (ce
->ce_flags
& CE_NEW_SKIP_WORKTREE
))
2510 return verify_absent_1(ce
, error_type
, ABSENT_ANY_DIRECTORY
, o
);
2513 static int verify_absent_sparse(const struct cache_entry
*ce
,
2514 enum unpack_trees_error_types error_type
,
2515 struct unpack_trees_options
*o
)
2517 return verify_absent_1(ce
, error_type
, COMPLETELY_ABSENT
, o
);
2520 static int merged_entry(const struct cache_entry
*ce
,
2521 const struct cache_entry
*old
,
2522 struct unpack_trees_options
*o
)
2524 int update
= CE_UPDATE
;
2525 struct cache_entry
*merge
= dup_cache_entry(ce
, &o
->result
);
2529 * New index entries. In sparse checkout, the following
2530 * verify_absent() will be delayed until after
2531 * traverse_trees() finishes in unpack_trees(), then:
2533 * - CE_NEW_SKIP_WORKTREE will be computed correctly
2534 * - verify_absent() be called again, this time with
2535 * correct CE_NEW_SKIP_WORKTREE
2537 * verify_absent() call here does nothing in sparse
2538 * checkout (i.e. o->skip_sparse_checkout == 0)
2541 merge
->ce_flags
|= CE_NEW_SKIP_WORKTREE
;
2543 if (verify_absent(merge
,
2544 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
, o
)) {
2545 discard_cache_entry(merge
);
2548 invalidate_ce_path(merge
, o
);
2550 if (submodule_from_ce(ce
) && file_exists(ce
->name
)) {
2551 int ret
= check_submodule_move_head(ce
, NULL
,
2552 oid_to_hex(&ce
->oid
),
2558 } else if (!(old
->ce_flags
& CE_CONFLICTED
)) {
2560 * See if we can re-use the old CE directly?
2561 * That way we get the uptodate stat info.
2563 * This also removes the UPDATE flag on a match; otherwise
2564 * we will end up overwriting local changes in the work tree.
2566 if (same(old
, merge
)) {
2567 copy_cache_entry(merge
, old
);
2570 if (verify_uptodate(old
, o
)) {
2571 discard_cache_entry(merge
);
2574 /* Migrate old flags over */
2575 update
|= old
->ce_flags
& (CE_SKIP_WORKTREE
| CE_NEW_SKIP_WORKTREE
);
2576 invalidate_ce_path(old
, o
);
2579 if (submodule_from_ce(ce
) && file_exists(ce
->name
)) {
2580 int ret
= check_submodule_move_head(ce
, oid_to_hex(&old
->oid
),
2581 oid_to_hex(&ce
->oid
),
2588 * Previously unmerged entry left as an existence
2589 * marker by read_index_unmerged();
2591 if (verify_absent_if_directory(merge
,
2592 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN
, o
)) {
2593 discard_cache_entry(merge
);
2597 invalidate_ce_path(old
, o
);
2600 if (do_add_entry(o
, merge
, update
, CE_STAGEMASK
) < 0)
2605 static int merged_sparse_dir(const struct cache_entry
* const *src
, int n
,
2606 struct unpack_trees_options
*o
)
2608 struct tree_desc t
[MAX_UNPACK_TREES
+ 1];
2609 void * tree_bufs
[MAX_UNPACK_TREES
+ 1];
2610 struct traverse_info info
;
2614 * Create the tree traversal information for traversing into *only* the
2617 setup_traverse_info(&info
, src
[0]->name
);
2618 info
.fn
= unpack_sparse_callback
;
2620 info
.show_all_errors
= o
->show_all_errors
;
2621 info
.pathspec
= o
->pathspec
;
2623 /* Get the tree descriptors of the sparse directory in each of the merging trees */
2624 for (i
= 0; i
< n
; i
++)
2625 tree_bufs
[i
] = fill_tree_descriptor(o
->src_index
->repo
, &t
[i
],
2626 src
[i
] && !is_null_oid(&src
[i
]->oid
) ? &src
[i
]->oid
: NULL
);
2628 ret
= traverse_trees(o
->src_index
, n
, t
, &info
);
2630 for (i
= 0; i
< n
; i
++)
2636 static int deleted_entry(const struct cache_entry
*ce
,
2637 const struct cache_entry
*old
,
2638 struct unpack_trees_options
*o
)
2640 /* Did it exist in the index? */
2642 if (verify_absent(ce
, ERROR_WOULD_LOSE_UNTRACKED_REMOVED
, o
))
2645 } else if (verify_absent_if_directory(ce
, ERROR_WOULD_LOSE_UNTRACKED_REMOVED
, o
)) {
2649 if (!(old
->ce_flags
& CE_CONFLICTED
) && verify_uptodate(old
, o
))
2651 add_entry(o
, ce
, CE_REMOVE
, 0);
2652 invalidate_ce_path(ce
, o
);
2656 static int keep_entry(const struct cache_entry
*ce
,
2657 struct unpack_trees_options
*o
)
2659 add_entry(o
, ce
, 0, 0);
2661 invalidate_ce_path(ce
, o
);
2666 static void show_stage_entry(FILE *o
,
2667 const char *label
, const struct cache_entry
*ce
)
2670 fprintf(o
, "%s (missing)\n", label
);
2672 fprintf(o
, "%s%06o %s %d\t%s\n",
2675 oid_to_hex(&ce
->oid
),
2681 int threeway_merge(const struct cache_entry
* const *stages
,
2682 struct unpack_trees_options
*o
)
2684 const struct cache_entry
*index
;
2685 const struct cache_entry
*head
;
2686 const struct cache_entry
*remote
= stages
[o
->head_idx
+ 1];
2689 int remote_match
= 0;
2691 int df_conflict_head
= 0;
2692 int df_conflict_remote
= 0;
2694 int any_anc_missing
= 0;
2695 int no_anc_exists
= 1;
2698 for (i
= 1; i
< o
->head_idx
; i
++) {
2699 if (!stages
[i
] || stages
[i
] == o
->df_conflict_entry
)
2700 any_anc_missing
= 1;
2706 head
= stages
[o
->head_idx
];
2708 if (head
== o
->df_conflict_entry
) {
2709 df_conflict_head
= 1;
2713 if (remote
== o
->df_conflict_entry
) {
2714 df_conflict_remote
= 1;
2719 * First, if there's a #16 situation, note that to prevent #13
2722 if (!same(remote
, head
)) {
2723 for (i
= 1; i
< o
->head_idx
; i
++) {
2724 if (same(stages
[i
], head
)) {
2727 if (same(stages
[i
], remote
)) {
2734 * We start with cases where the index is allowed to match
2735 * something other than the head: #14(ALT) and #2ALT, where it
2736 * is permitted to match the result instead.
2738 /* #14, #14ALT, #2ALT */
2739 if (remote
&& !df_conflict_head
&& head_match
&& !remote_match
) {
2740 if (index
&& !same(index
, remote
) && !same(index
, head
)) {
2741 if (S_ISSPARSEDIR(index
->ce_mode
))
2742 return merged_sparse_dir(stages
, 4, o
);
2744 return reject_merge(index
, o
);
2746 return merged_entry(remote
, index
, o
);
2749 * If we have an entry in the index cache, then we want to
2750 * make sure that it matches head.
2752 if (index
&& !same(index
, head
)) {
2753 if (S_ISSPARSEDIR(index
->ce_mode
))
2754 return merged_sparse_dir(stages
, 4, o
);
2756 return reject_merge(index
, o
);
2761 if (same(head
, remote
))
2762 return merged_entry(head
, index
, o
);
2764 if (!df_conflict_remote
&& remote_match
&& !head_match
)
2765 return merged_entry(head
, index
, o
);
2769 if (!head
&& !remote
&& any_anc_missing
)
2773 * Under the "aggressive" rule, we resolve mostly trivial
2774 * cases that we historically had git-merge-one-file resolve.
2776 if (o
->aggressive
) {
2777 int head_deleted
= !head
;
2778 int remote_deleted
= !remote
;
2779 const struct cache_entry
*ce
= NULL
;
2788 for (i
= 1; i
< o
->head_idx
; i
++) {
2789 if (stages
[i
] && stages
[i
] != o
->df_conflict_entry
) {
2798 * Deleted in one and unchanged in the other.
2800 if ((head_deleted
&& remote_deleted
) ||
2801 (head_deleted
&& remote
&& remote_match
) ||
2802 (remote_deleted
&& head
&& head_match
)) {
2804 return deleted_entry(index
, index
, o
);
2805 if (ce
&& !head_deleted
) {
2806 if (verify_absent(ce
, ERROR_WOULD_LOSE_UNTRACKED_REMOVED
, o
))
2812 * Added in both, identically.
2814 if (no_anc_exists
&& head
&& remote
&& same(head
, remote
))
2815 return merged_entry(head
, index
, o
);
2819 /* Handle "no merge" cases (see t/t1000-read-tree-m-3way.sh) */
2822 * If we've reached the "no merge" cases and we're merging
2823 * a sparse directory, we may have an "edit/edit" conflict that
2824 * can be resolved by individually merging directory contents.
2826 if (S_ISSPARSEDIR(index
->ce_mode
))
2827 return merged_sparse_dir(stages
, 4, o
);
2830 * If we're not merging a sparse directory, ensure the index is
2831 * up-to-date to avoid files getting overwritten with conflict
2834 if (verify_uptodate(index
, o
))
2838 o
->nontrivial_merge
= 1;
2840 /* #2, #3, #4, #6, #7, #9, #10, #11. */
2842 if (!head_match
|| !remote_match
) {
2843 for (i
= 1; i
< o
->head_idx
; i
++) {
2844 if (stages
[i
] && stages
[i
] != o
->df_conflict_entry
) {
2845 keep_entry(stages
[i
], o
);
2853 fprintf(stderr
, "read-tree: warning #16 detected\n");
2854 show_stage_entry(stderr
, "head ", stages
[head_match
]);
2855 show_stage_entry(stderr
, "remote ", stages
[remote_match
]);
2858 if (head
) { count
+= keep_entry(head
, o
); }
2859 if (remote
) { count
+= keep_entry(remote
, o
); }
2866 * The rule is to "carry forward" what is in the index without losing
2867 * information across a "fast-forward", favoring a successful merge
2868 * over a merge failure when it makes sense. For details of the
2869 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
2872 int twoway_merge(const struct cache_entry
* const *src
,
2873 struct unpack_trees_options
*o
)
2875 const struct cache_entry
*current
= src
[0];
2876 const struct cache_entry
*oldtree
= src
[1];
2877 const struct cache_entry
*newtree
= src
[2];
2879 if (o
->merge_size
!= 2)
2880 return error("Cannot do a twoway merge of %d trees",
2883 if (oldtree
== o
->df_conflict_entry
)
2885 if (newtree
== o
->df_conflict_entry
)
2889 if (current
->ce_flags
& CE_CONFLICTED
) {
2890 if (same(oldtree
, newtree
) || o
->reset
) {
2892 return deleted_entry(current
, current
, o
);
2894 return merged_entry(newtree
, current
, o
);
2896 return reject_merge(current
, o
);
2897 } else if ((!oldtree
&& !newtree
) || /* 4 and 5 */
2898 (!oldtree
&& newtree
&&
2899 same(current
, newtree
)) || /* 6 and 7 */
2900 (oldtree
&& newtree
&&
2901 same(oldtree
, newtree
)) || /* 14 and 15 */
2902 (oldtree
&& newtree
&&
2903 !same(oldtree
, newtree
) && /* 18 and 19 */
2904 same(current
, newtree
))) {
2905 return keep_entry(current
, o
);
2906 } else if (oldtree
&& !newtree
&& same(current
, oldtree
)) {
2908 return deleted_entry(oldtree
, current
, o
);
2909 } else if (oldtree
&& newtree
&&
2910 same(current
, oldtree
) && !same(current
, newtree
)) {
2912 return merged_entry(newtree
, current
, o
);
2913 } else if (current
&& !oldtree
&& newtree
&&
2914 S_ISSPARSEDIR(current
->ce_mode
) != S_ISSPARSEDIR(newtree
->ce_mode
) &&
2915 ce_stage(current
) == 0) {
2917 * This case is a directory/file conflict across the sparse-index
2918 * boundary. When we are changing from one path to another via
2919 * 'git checkout', then we want to replace one entry with another
2920 * via merged_entry(). If there are staged changes, then we should
2921 * reject the merge instead.
2923 return merged_entry(newtree
, current
, o
);
2924 } else if (S_ISSPARSEDIR(current
->ce_mode
)) {
2926 * The sparse directories differ, but we don't know whether that's
2927 * because of two different files in the directory being modified
2928 * (can be trivially merged) or if there is a real file conflict.
2929 * Merge the sparse directory by OID to compare file-by-file.
2931 return merged_sparse_dir(src
, 3, o
);
2933 return reject_merge(current
, o
);
2936 if (oldtree
&& !o
->initial_checkout
) {
2938 * deletion of the path was staged;
2940 if (same(oldtree
, newtree
))
2942 return reject_merge(oldtree
, o
);
2944 return merged_entry(newtree
, current
, o
);
2946 return deleted_entry(oldtree
, current
, o
);
2952 * Keep the index entries at stage0, collapse stage1 but make sure
2953 * stage0 does not have anything there.
2955 int bind_merge(const struct cache_entry
* const *src
,
2956 struct unpack_trees_options
*o
)
2958 const struct cache_entry
*old
= src
[0];
2959 const struct cache_entry
*a
= src
[1];
2961 if (o
->merge_size
!= 1)
2962 return error("Cannot do a bind merge of %d trees",
2965 return o
->quiet
? -1 :
2966 error(ERRORMSG(o
, ERROR_BIND_OVERLAP
),
2967 super_prefixed(a
->name
, o
->super_prefix
),
2968 super_prefixed(old
->name
, o
->super_prefix
));
2970 return keep_entry(old
, o
);
2972 return merged_entry(a
, NULL
, o
);
2979 * - take the stat information from stage0, take the data from stage1
2981 int oneway_merge(const struct cache_entry
* const *src
,
2982 struct unpack_trees_options
*o
)
2984 const struct cache_entry
*old
= src
[0];
2985 const struct cache_entry
*a
= src
[1];
2987 if (o
->merge_size
!= 1)
2988 return error("Cannot do a oneway merge of %d trees",
2991 if (!a
|| a
== o
->df_conflict_entry
)
2992 return deleted_entry(old
, old
, o
);
2994 if (old
&& same(old
, a
)) {
2996 if (o
->reset
&& o
->update
&& !ce_uptodate(old
) && !ce_skip_worktree(old
) &&
2997 !(old
->ce_flags
& CE_FSMONITOR_VALID
)) {
2999 if (lstat(old
->name
, &st
) ||
3000 ie_match_stat(o
->src_index
, old
, &st
, CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
))
3001 update
|= CE_UPDATE
;
3003 if (o
->update
&& S_ISGITLINK(old
->ce_mode
) &&
3004 should_update_submodules() && !verify_uptodate(old
, o
))
3005 update
|= CE_UPDATE
;
3006 add_entry(o
, old
, update
, CE_STAGEMASK
);
3009 return merged_entry(a
, old
, o
);
3013 * Merge worktree and untracked entries in a stash entry.
3015 * Ignore all index entries. Collapse remaining trees but make sure that they
3016 * don't have any conflicting files.
3018 int stash_worktree_untracked_merge(const struct cache_entry
* const *src
,
3019 struct unpack_trees_options
*o
)
3021 const struct cache_entry
*worktree
= src
[1];
3022 const struct cache_entry
*untracked
= src
[2];
3024 if (o
->merge_size
!= 2)
3025 BUG("invalid merge_size: %d", o
->merge_size
);
3027 if (worktree
&& untracked
)
3028 return error(_("worktree and untracked commit have duplicate entries: %s"),
3029 super_prefixed(worktree
->name
, o
->super_prefix
));
3031 return merged_entry(worktree
? worktree
: untracked
, NULL
, o
);