builtin/commit-graph.c: extract 'read_one_commit()'
[git.git] / shallow.c
blob321a27670fcc9197e4254ac2e2df7bbd698c632c
1 #include "cache.h"
2 #include "repository.h"
3 #include "tempfile.h"
4 #include "lockfile.h"
5 #include "object-store.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "pkt-line.h"
9 #include "remote.h"
10 #include "refs.h"
11 #include "oid-array.h"
12 #include "diff.h"
13 #include "revision.h"
14 #include "commit-slab.h"
15 #include "list-objects.h"
16 #include "commit-reach.h"
18 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
20 if (r->parsed_objects->is_shallow != -1)
21 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
22 if (r->parsed_objects->alternate_shallow_file && !override)
23 return;
24 free(r->parsed_objects->alternate_shallow_file);
25 r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
28 int register_shallow(struct repository *r, const struct object_id *oid)
30 struct commit_graft *graft =
31 xmalloc(sizeof(struct commit_graft));
32 struct commit *commit = lookup_commit(the_repository, oid);
34 oidcpy(&graft->oid, oid);
35 graft->nr_parent = -1;
36 if (commit && commit->object.parsed)
37 commit->parents = NULL;
38 return register_commit_graft(r, graft, 0);
41 int is_repository_shallow(struct repository *r)
43 FILE *fp;
44 char buf[1024];
45 const char *path = r->parsed_objects->alternate_shallow_file;
47 if (r->parsed_objects->is_shallow >= 0)
48 return r->parsed_objects->is_shallow;
50 if (!path)
51 path = git_path_shallow(r);
53 * fetch-pack sets '--shallow-file ""' as an indicator that no
54 * shallow file should be used. We could just open it and it
55 * will likely fail. But let's do an explicit check instead.
57 if (!*path || (fp = fopen(path, "r")) == NULL) {
58 stat_validity_clear(r->parsed_objects->shallow_stat);
59 r->parsed_objects->is_shallow = 0;
60 return r->parsed_objects->is_shallow;
62 stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
63 r->parsed_objects->is_shallow = 1;
65 while (fgets(buf, sizeof(buf), fp)) {
66 struct object_id oid;
67 if (get_oid_hex(buf, &oid))
68 die("bad shallow line: %s", buf);
69 register_shallow(r, &oid);
71 fclose(fp);
72 return r->parsed_objects->is_shallow;
75 static void reset_repository_shallow(struct repository *r)
77 r->parsed_objects->is_shallow = -1;
78 stat_validity_clear(r->parsed_objects->shallow_stat);
81 int commit_shallow_file(struct repository *r, struct lock_file *lk)
83 int res = commit_lock_file(lk);
84 reset_repository_shallow(r);
85 return res;
88 void rollback_shallow_file(struct repository *r, struct lock_file *lk)
90 rollback_lock_file(lk);
91 reset_repository_shallow(r);
95 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
96 * supports a "valid" flag.
98 define_commit_slab(commit_depth, int *);
99 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
100 int shallow_flag, int not_shallow_flag)
102 int i = 0, cur_depth = 0;
103 struct commit_list *result = NULL;
104 struct object_array stack = OBJECT_ARRAY_INIT;
105 struct commit *commit = NULL;
106 struct commit_graft *graft;
107 struct commit_depth depths;
109 init_commit_depth(&depths);
110 while (commit || i < heads->nr || stack.nr) {
111 struct commit_list *p;
112 if (!commit) {
113 if (i < heads->nr) {
114 int **depth_slot;
115 commit = (struct commit *)
116 deref_tag(the_repository,
117 heads->objects[i++].item,
118 NULL, 0);
119 if (!commit || commit->object.type != OBJ_COMMIT) {
120 commit = NULL;
121 continue;
123 depth_slot = commit_depth_at(&depths, commit);
124 if (!*depth_slot)
125 *depth_slot = xmalloc(sizeof(int));
126 **depth_slot = 0;
127 cur_depth = 0;
128 } else {
129 commit = (struct commit *)
130 object_array_pop(&stack);
131 cur_depth = **commit_depth_at(&depths, commit);
134 parse_commit_or_die(commit);
135 cur_depth++;
136 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
137 (is_repository_shallow(the_repository) && !commit->parents &&
138 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
139 graft->nr_parent < 0)) {
140 commit_list_insert(commit, &result);
141 commit->object.flags |= shallow_flag;
142 commit = NULL;
143 continue;
145 commit->object.flags |= not_shallow_flag;
146 for (p = commit->parents, commit = NULL; p; p = p->next) {
147 int **depth_slot = commit_depth_at(&depths, p->item);
148 if (!*depth_slot) {
149 *depth_slot = xmalloc(sizeof(int));
150 **depth_slot = cur_depth;
151 } else {
152 if (cur_depth >= **depth_slot)
153 continue;
154 **depth_slot = cur_depth;
156 if (p->next)
157 add_object_array(&p->item->object,
158 NULL, &stack);
159 else {
160 commit = p->item;
161 cur_depth = **commit_depth_at(&depths, commit);
165 for (i = 0; i < depths.slab_count; i++) {
166 int j;
168 if (!depths.slab[i])
169 continue;
170 for (j = 0; j < depths.slab_size; j++)
171 free(depths.slab[i][j]);
173 clear_commit_depth(&depths);
175 return result;
178 static void show_commit(struct commit *commit, void *data)
180 commit_list_insert(commit, data);
184 * Given rev-list arguments, run rev-list. All reachable commits
185 * except border ones are marked with not_shallow_flag. Border commits
186 * are marked with shallow_flag. The list of border/shallow commits
187 * are also returned.
189 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
190 int shallow_flag,
191 int not_shallow_flag)
193 struct commit_list *result = NULL, *p;
194 struct commit_list *not_shallow_list = NULL;
195 struct rev_info revs;
196 int both_flags = shallow_flag | not_shallow_flag;
199 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
200 * set at this point. But better be safe than sorry.
202 clear_object_flags(both_flags);
204 is_repository_shallow(the_repository); /* make sure shallows are read */
206 repo_init_revisions(the_repository, &revs, NULL);
207 save_commit_buffer = 0;
208 setup_revisions(ac, av, &revs, NULL);
210 if (prepare_revision_walk(&revs))
211 die("revision walk setup failed");
212 traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
214 if (!not_shallow_list)
215 die("no commits selected for shallow requests");
217 /* Mark all reachable commits as NOT_SHALLOW */
218 for (p = not_shallow_list; p; p = p->next)
219 p->item->object.flags |= not_shallow_flag;
222 * mark border commits SHALLOW + NOT_SHALLOW.
223 * We cannot clear NOT_SHALLOW right now. Imagine border
224 * commit A is processed first, then commit B, whose parent is
225 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
226 * itself is considered border at step 2, which is incorrect.
228 for (p = not_shallow_list; p; p = p->next) {
229 struct commit *c = p->item;
230 struct commit_list *parent;
232 if (parse_commit(c))
233 die("unable to parse commit %s",
234 oid_to_hex(&c->object.oid));
236 for (parent = c->parents; parent; parent = parent->next)
237 if (!(parent->item->object.flags & not_shallow_flag)) {
238 c->object.flags |= shallow_flag;
239 commit_list_insert(c, &result);
240 break;
243 free_commit_list(not_shallow_list);
246 * Now we can clean up NOT_SHALLOW on border commits. Having
247 * both flags set can confuse the caller.
249 for (p = result; p; p = p->next) {
250 struct object *o = &p->item->object;
251 if ((o->flags & both_flags) == both_flags)
252 o->flags &= ~not_shallow_flag;
254 return result;
257 static void check_shallow_file_for_update(struct repository *r)
259 if (r->parsed_objects->is_shallow == -1)
260 BUG("shallow must be initialized by now");
262 if (!stat_validity_check(r->parsed_objects->shallow_stat,
263 git_path_shallow(r)))
264 die("shallow file has changed since we read it");
267 #define SEEN_ONLY 1
268 #define VERBOSE 2
269 #define QUICK 4
271 struct write_shallow_data {
272 struct strbuf *out;
273 int use_pack_protocol;
274 int count;
275 unsigned flags;
278 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
280 struct write_shallow_data *data = cb_data;
281 const char *hex = oid_to_hex(&graft->oid);
282 if (graft->nr_parent != -1)
283 return 0;
284 if (data->flags & QUICK) {
285 if (!has_object_file(&graft->oid))
286 return 0;
287 } else if (data->flags & SEEN_ONLY) {
288 struct commit *c = lookup_commit(the_repository, &graft->oid);
289 if (!c || !(c->object.flags & SEEN)) {
290 if (data->flags & VERBOSE)
291 printf("Removing %s from .git/shallow\n",
292 oid_to_hex(&c->object.oid));
293 return 0;
296 data->count++;
297 if (data->use_pack_protocol)
298 packet_buf_write(data->out, "shallow %s", hex);
299 else {
300 strbuf_addstr(data->out, hex);
301 strbuf_addch(data->out, '\n');
303 return 0;
306 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
307 const struct oid_array *extra,
308 unsigned flags)
310 struct write_shallow_data data;
311 int i;
312 data.out = out;
313 data.use_pack_protocol = use_pack_protocol;
314 data.count = 0;
315 data.flags = flags;
316 for_each_commit_graft(write_one_shallow, &data);
317 if (!extra)
318 return data.count;
319 for (i = 0; i < extra->nr; i++) {
320 strbuf_addstr(out, oid_to_hex(extra->oid + i));
321 strbuf_addch(out, '\n');
322 data.count++;
324 return data.count;
327 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
328 const struct oid_array *extra)
330 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
333 const char *setup_temporary_shallow(const struct oid_array *extra)
335 struct tempfile *temp;
336 struct strbuf sb = STRBUF_INIT;
338 if (write_shallow_commits(&sb, 0, extra)) {
339 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
341 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
342 close_tempfile_gently(temp) < 0)
343 die_errno("failed to write to %s",
344 get_tempfile_path(temp));
345 strbuf_release(&sb);
346 return get_tempfile_path(temp);
349 * is_repository_shallow() sees empty string as "no shallow
350 * file".
352 return "";
355 void setup_alternate_shallow(struct lock_file *shallow_lock,
356 const char **alternate_shallow_file,
357 const struct oid_array *extra)
359 struct strbuf sb = STRBUF_INIT;
360 int fd;
362 fd = hold_lock_file_for_update(shallow_lock,
363 git_path_shallow(the_repository),
364 LOCK_DIE_ON_ERROR);
365 check_shallow_file_for_update(the_repository);
366 if (write_shallow_commits(&sb, 0, extra)) {
367 if (write_in_full(fd, sb.buf, sb.len) < 0)
368 die_errno("failed to write to %s",
369 get_lock_file_path(shallow_lock));
370 *alternate_shallow_file = get_lock_file_path(shallow_lock);
371 } else
373 * is_repository_shallow() sees empty string as "no
374 * shallow file".
376 *alternate_shallow_file = "";
377 strbuf_release(&sb);
380 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
382 int fd = *(int *)cb;
383 if (graft->nr_parent == -1)
384 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
385 return 0;
388 void advertise_shallow_grafts(int fd)
390 if (!is_repository_shallow(the_repository))
391 return;
392 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
396 * mark_reachable_objects() should have been run prior to this and all
397 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
398 * in which case lines are excised from the shallow file if they refer to
399 * commits that do not exist (any longer).
401 void prune_shallow(unsigned options)
403 struct lock_file shallow_lock = LOCK_INIT;
404 struct strbuf sb = STRBUF_INIT;
405 unsigned flags = SEEN_ONLY;
406 int fd;
408 if (options & PRUNE_QUICK)
409 flags |= QUICK;
411 if (options & PRUNE_SHOW_ONLY) {
412 flags |= VERBOSE;
413 write_shallow_commits_1(&sb, 0, NULL, flags);
414 strbuf_release(&sb);
415 return;
417 fd = hold_lock_file_for_update(&shallow_lock,
418 git_path_shallow(the_repository),
419 LOCK_DIE_ON_ERROR);
420 check_shallow_file_for_update(the_repository);
421 if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
422 if (write_in_full(fd, sb.buf, sb.len) < 0)
423 die_errno("failed to write to %s",
424 get_lock_file_path(&shallow_lock));
425 commit_shallow_file(the_repository, &shallow_lock);
426 } else {
427 unlink(git_path_shallow(the_repository));
428 rollback_shallow_file(the_repository, &shallow_lock);
430 strbuf_release(&sb);
433 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
436 * Step 1, split sender shallow commits into "ours" and "theirs"
437 * Step 2, clean "ours" based on .git/shallow
439 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
441 int i;
442 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
443 memset(info, 0, sizeof(*info));
444 info->shallow = sa;
445 if (!sa)
446 return;
447 ALLOC_ARRAY(info->ours, sa->nr);
448 ALLOC_ARRAY(info->theirs, sa->nr);
449 for (i = 0; i < sa->nr; i++) {
450 if (has_object_file(sa->oid + i)) {
451 struct commit_graft *graft;
452 graft = lookup_commit_graft(the_repository,
453 &sa->oid[i]);
454 if (graft && graft->nr_parent < 0)
455 continue;
456 info->ours[info->nr_ours++] = i;
457 } else
458 info->theirs[info->nr_theirs++] = i;
462 void clear_shallow_info(struct shallow_info *info)
464 free(info->ours);
465 free(info->theirs);
468 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
470 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
472 struct object_id *oid = info->shallow->oid;
473 int i, dst;
474 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
475 for (i = dst = 0; i < info->nr_theirs; i++) {
476 if (i != dst)
477 info->theirs[dst] = info->theirs[i];
478 if (has_object_file(oid + info->theirs[i]))
479 dst++;
481 info->nr_theirs = dst;
484 define_commit_slab(ref_bitmap, uint32_t *);
486 #define POOL_SIZE (512 * 1024)
488 struct paint_info {
489 struct ref_bitmap ref_bitmap;
490 unsigned nr_bits;
491 char **pools;
492 char *free, *end;
493 unsigned pool_count;
496 static uint32_t *paint_alloc(struct paint_info *info)
498 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
499 unsigned size = nr * sizeof(uint32_t);
500 void *p;
501 if (!info->pool_count || size > info->end - info->free) {
502 if (size > POOL_SIZE)
503 BUG("pool size too small for %d in paint_alloc()",
504 size);
505 info->pool_count++;
506 REALLOC_ARRAY(info->pools, info->pool_count);
507 info->free = xmalloc(POOL_SIZE);
508 info->pools[info->pool_count - 1] = info->free;
509 info->end = info->free + POOL_SIZE;
511 p = info->free;
512 info->free += size;
513 return p;
517 * Given a commit SHA-1, walk down to parents until either SEEN,
518 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
519 * all walked commits.
521 static void paint_down(struct paint_info *info, const struct object_id *oid,
522 unsigned int id)
524 unsigned int i, nr;
525 struct commit_list *head = NULL;
526 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
527 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
528 struct commit *c = lookup_commit_reference_gently(the_repository, oid,
530 uint32_t *tmp; /* to be freed before return */
531 uint32_t *bitmap;
533 if (!c)
534 return;
536 tmp = xmalloc(bitmap_size);
537 bitmap = paint_alloc(info);
538 memset(bitmap, 0, bitmap_size);
539 bitmap[id / 32] |= (1U << (id % 32));
540 commit_list_insert(c, &head);
541 while (head) {
542 struct commit_list *p;
543 struct commit *c = pop_commit(&head);
544 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
546 /* XXX check "UNINTERESTING" from pack bitmaps if available */
547 if (c->object.flags & (SEEN | UNINTERESTING))
548 continue;
549 else
550 c->object.flags |= SEEN;
552 if (*refs == NULL)
553 *refs = bitmap;
554 else {
555 memcpy(tmp, *refs, bitmap_size);
556 for (i = 0; i < bitmap_nr; i++)
557 tmp[i] |= bitmap[i];
558 if (memcmp(tmp, *refs, bitmap_size)) {
559 *refs = paint_alloc(info);
560 memcpy(*refs, tmp, bitmap_size);
564 if (c->object.flags & BOTTOM)
565 continue;
567 if (parse_commit(c))
568 die("unable to parse commit %s",
569 oid_to_hex(&c->object.oid));
571 for (p = c->parents; p; p = p->next) {
572 if (p->item->object.flags & SEEN)
573 continue;
574 commit_list_insert(p->item, &head);
578 nr = get_max_object_index();
579 for (i = 0; i < nr; i++) {
580 struct object *o = get_indexed_object(i);
581 if (o && o->type == OBJ_COMMIT)
582 o->flags &= ~SEEN;
585 free(tmp);
588 static int mark_uninteresting(const char *refname, const struct object_id *oid,
589 int flags, void *cb_data)
591 struct commit *commit = lookup_commit_reference_gently(the_repository,
592 oid, 1);
593 if (!commit)
594 return 0;
595 commit->object.flags |= UNINTERESTING;
596 mark_parents_uninteresting(commit);
597 return 0;
600 static void post_assign_shallow(struct shallow_info *info,
601 struct ref_bitmap *ref_bitmap,
602 int *ref_status);
604 * Step 6(+7), associate shallow commits with new refs
606 * info->ref must be initialized before calling this function.
608 * If used is not NULL, it's an array of info->shallow->nr
609 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
610 * m-th shallow commit from info->shallow.
612 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
613 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
614 * the ref needs some shallow commits from either info->ours or
615 * info->theirs.
617 void assign_shallow_commits_to_refs(struct shallow_info *info,
618 uint32_t **used, int *ref_status)
620 struct object_id *oid = info->shallow->oid;
621 struct oid_array *ref = info->ref;
622 unsigned int i, nr;
623 int *shallow, nr_shallow = 0;
624 struct paint_info pi;
626 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
627 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
628 for (i = 0; i < info->nr_ours; i++)
629 shallow[nr_shallow++] = info->ours[i];
630 for (i = 0; i < info->nr_theirs; i++)
631 shallow[nr_shallow++] = info->theirs[i];
634 * Prepare the commit graph to track what refs can reach what
635 * (new) shallow commits.
637 nr = get_max_object_index();
638 for (i = 0; i < nr; i++) {
639 struct object *o = get_indexed_object(i);
640 if (!o || o->type != OBJ_COMMIT)
641 continue;
643 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
646 memset(&pi, 0, sizeof(pi));
647 init_ref_bitmap(&pi.ref_bitmap);
648 pi.nr_bits = ref->nr;
651 * "--not --all" to cut short the traversal if new refs
652 * connect to old refs. If not (e.g. force ref updates) it'll
653 * have to go down to the current shallow commits.
655 head_ref(mark_uninteresting, NULL);
656 for_each_ref(mark_uninteresting, NULL);
658 /* Mark potential bottoms so we won't go out of bound */
659 for (i = 0; i < nr_shallow; i++) {
660 struct commit *c = lookup_commit(the_repository,
661 &oid[shallow[i]]);
662 c->object.flags |= BOTTOM;
665 for (i = 0; i < ref->nr; i++)
666 paint_down(&pi, ref->oid + i, i);
668 if (used) {
669 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
670 memset(used, 0, sizeof(*used) * info->shallow->nr);
671 for (i = 0; i < nr_shallow; i++) {
672 const struct commit *c = lookup_commit(the_repository,
673 &oid[shallow[i]]);
674 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
675 if (*map)
676 used[shallow[i]] = xmemdupz(*map, bitmap_size);
679 * unreachable shallow commits are not removed from
680 * "ours" and "theirs". The user is supposed to run
681 * step 7 on every ref separately and not trust "ours"
682 * and "theirs" any more.
684 } else
685 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
687 clear_ref_bitmap(&pi.ref_bitmap);
688 for (i = 0; i < pi.pool_count; i++)
689 free(pi.pools[i]);
690 free(pi.pools);
691 free(shallow);
694 struct commit_array {
695 struct commit **commits;
696 int nr, alloc;
699 static int add_ref(const char *refname, const struct object_id *oid,
700 int flags, void *cb_data)
702 struct commit_array *ca = cb_data;
703 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
704 ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
705 oid, 1);
706 if (ca->commits[ca->nr])
707 ca->nr++;
708 return 0;
711 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
713 unsigned int i;
714 if (!ref_status)
715 return;
716 for (i = 0; i < nr; i++)
717 if (bitmap[i / 32] & (1U << (i % 32)))
718 ref_status[i]++;
722 * Step 7, reachability test on "ours" at commit level
724 static void post_assign_shallow(struct shallow_info *info,
725 struct ref_bitmap *ref_bitmap,
726 int *ref_status)
728 struct object_id *oid = info->shallow->oid;
729 struct commit *c;
730 uint32_t **bitmap;
731 int dst, i, j;
732 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
733 struct commit_array ca;
735 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
736 if (ref_status)
737 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
739 /* Remove unreachable shallow commits from "theirs" */
740 for (i = dst = 0; i < info->nr_theirs; i++) {
741 if (i != dst)
742 info->theirs[dst] = info->theirs[i];
743 c = lookup_commit(the_repository, &oid[info->theirs[i]]);
744 bitmap = ref_bitmap_at(ref_bitmap, c);
745 if (!*bitmap)
746 continue;
747 for (j = 0; j < bitmap_nr; j++)
748 if (bitmap[0][j]) {
749 update_refstatus(ref_status, info->ref->nr, *bitmap);
750 dst++;
751 break;
754 info->nr_theirs = dst;
756 memset(&ca, 0, sizeof(ca));
757 head_ref(add_ref, &ca);
758 for_each_ref(add_ref, &ca);
760 /* Remove unreachable shallow commits from "ours" */
761 for (i = dst = 0; i < info->nr_ours; i++) {
762 if (i != dst)
763 info->ours[dst] = info->ours[i];
764 c = lookup_commit(the_repository, &oid[info->ours[i]]);
765 bitmap = ref_bitmap_at(ref_bitmap, c);
766 if (!*bitmap)
767 continue;
768 for (j = 0; j < bitmap_nr; j++)
769 if (bitmap[0][j] &&
770 /* Step 7, reachability test at commit level */
771 !in_merge_bases_many(c, ca.nr, ca.commits)) {
772 update_refstatus(ref_status, info->ref->nr, *bitmap);
773 dst++;
774 break;
777 info->nr_ours = dst;
779 free(ca.commits);
782 /* (Delayed) step 7, reachability test at commit level */
783 int delayed_reachability_test(struct shallow_info *si, int c)
785 if (si->need_reachability_test[c]) {
786 struct commit *commit = lookup_commit(the_repository,
787 &si->shallow->oid[c]);
789 if (!si->commits) {
790 struct commit_array ca;
792 memset(&ca, 0, sizeof(ca));
793 head_ref(add_ref, &ca);
794 for_each_ref(add_ref, &ca);
795 si->commits = ca.commits;
796 si->nr_commits = ca.nr;
799 si->reachable[c] = in_merge_bases_many(commit,
800 si->nr_commits,
801 si->commits);
802 si->need_reachability_test[c] = 0;
804 return si->reachable[c];