Merge branch 'jc/drop-gen-hdrs'
[git/raj.git] / shallow.c
blob7fd04afed19af7de9dc1438b328567d0b7f2b2b4
1 #include "cache.h"
2 #include "repository.h"
3 #include "tempfile.h"
4 #include "lockfile.h"
5 #include "object-store.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "pkt-line.h"
9 #include "remote.h"
10 #include "refs.h"
11 #include "sha1-array.h"
12 #include "diff.h"
13 #include "revision.h"
14 #include "commit-slab.h"
15 #include "list-objects.h"
16 #include "commit-reach.h"
18 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
20 if (r->parsed_objects->is_shallow != -1)
21 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
22 if (r->parsed_objects->alternate_shallow_file && !override)
23 return;
24 free(r->parsed_objects->alternate_shallow_file);
25 r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
28 int register_shallow(struct repository *r, const struct object_id *oid)
30 struct commit_graft *graft =
31 xmalloc(sizeof(struct commit_graft));
32 struct commit *commit = lookup_commit(the_repository, oid);
34 oidcpy(&graft->oid, oid);
35 graft->nr_parent = -1;
36 if (commit && commit->object.parsed)
37 commit->parents = NULL;
38 return register_commit_graft(r, graft, 0);
41 int is_repository_shallow(struct repository *r)
44 * NEEDSWORK: This function updates
45 * r->parsed_objects->{is_shallow,shallow_stat} as a side effect but
46 * there is no corresponding function to clear them when the shallow
47 * file is updated.
50 FILE *fp;
51 char buf[1024];
52 const char *path = r->parsed_objects->alternate_shallow_file;
54 if (r->parsed_objects->is_shallow >= 0)
55 return r->parsed_objects->is_shallow;
57 if (!path)
58 path = git_path_shallow(r);
60 * fetch-pack sets '--shallow-file ""' as an indicator that no
61 * shallow file should be used. We could just open it and it
62 * will likely fail. But let's do an explicit check instead.
64 if (!*path || (fp = fopen(path, "r")) == NULL) {
65 stat_validity_clear(r->parsed_objects->shallow_stat);
66 r->parsed_objects->is_shallow = 0;
67 return r->parsed_objects->is_shallow;
69 stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
70 r->parsed_objects->is_shallow = 1;
72 while (fgets(buf, sizeof(buf), fp)) {
73 struct object_id oid;
74 if (get_oid_hex(buf, &oid))
75 die("bad shallow line: %s", buf);
76 register_shallow(r, &oid);
78 fclose(fp);
79 return r->parsed_objects->is_shallow;
83 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
84 * supports a "valid" flag.
86 define_commit_slab(commit_depth, int *);
87 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
88 int shallow_flag, int not_shallow_flag)
90 int i = 0, cur_depth = 0;
91 struct commit_list *result = NULL;
92 struct object_array stack = OBJECT_ARRAY_INIT;
93 struct commit *commit = NULL;
94 struct commit_graft *graft;
95 struct commit_depth depths;
97 init_commit_depth(&depths);
98 while (commit || i < heads->nr || stack.nr) {
99 struct commit_list *p;
100 if (!commit) {
101 if (i < heads->nr) {
102 int **depth_slot;
103 commit = (struct commit *)
104 deref_tag(the_repository,
105 heads->objects[i++].item,
106 NULL, 0);
107 if (!commit || commit->object.type != OBJ_COMMIT) {
108 commit = NULL;
109 continue;
111 depth_slot = commit_depth_at(&depths, commit);
112 if (!*depth_slot)
113 *depth_slot = xmalloc(sizeof(int));
114 **depth_slot = 0;
115 cur_depth = 0;
116 } else {
117 commit = (struct commit *)
118 object_array_pop(&stack);
119 cur_depth = **commit_depth_at(&depths, commit);
122 parse_commit_or_die(commit);
123 cur_depth++;
124 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
125 (is_repository_shallow(the_repository) && !commit->parents &&
126 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
127 graft->nr_parent < 0)) {
128 commit_list_insert(commit, &result);
129 commit->object.flags |= shallow_flag;
130 commit = NULL;
131 continue;
133 commit->object.flags |= not_shallow_flag;
134 for (p = commit->parents, commit = NULL; p; p = p->next) {
135 int **depth_slot = commit_depth_at(&depths, p->item);
136 if (!*depth_slot) {
137 *depth_slot = xmalloc(sizeof(int));
138 **depth_slot = cur_depth;
139 } else {
140 if (cur_depth >= **depth_slot)
141 continue;
142 **depth_slot = cur_depth;
144 if (p->next)
145 add_object_array(&p->item->object,
146 NULL, &stack);
147 else {
148 commit = p->item;
149 cur_depth = **commit_depth_at(&depths, commit);
153 for (i = 0; i < depths.slab_count; i++) {
154 int j;
156 if (!depths.slab[i])
157 continue;
158 for (j = 0; j < depths.slab_size; j++)
159 free(depths.slab[i][j]);
161 clear_commit_depth(&depths);
163 return result;
166 static void show_commit(struct commit *commit, void *data)
168 commit_list_insert(commit, data);
172 * Given rev-list arguments, run rev-list. All reachable commits
173 * except border ones are marked with not_shallow_flag. Border commits
174 * are marked with shallow_flag. The list of border/shallow commits
175 * are also returned.
177 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
178 int shallow_flag,
179 int not_shallow_flag)
181 struct commit_list *result = NULL, *p;
182 struct commit_list *not_shallow_list = NULL;
183 struct rev_info revs;
184 int both_flags = shallow_flag | not_shallow_flag;
187 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
188 * set at this point. But better be safe than sorry.
190 clear_object_flags(both_flags);
192 is_repository_shallow(the_repository); /* make sure shallows are read */
194 repo_init_revisions(the_repository, &revs, NULL);
195 save_commit_buffer = 0;
196 setup_revisions(ac, av, &revs, NULL);
198 if (prepare_revision_walk(&revs))
199 die("revision walk setup failed");
200 traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
202 if (!not_shallow_list)
203 die("no commits selected for shallow requests");
205 /* Mark all reachable commits as NOT_SHALLOW */
206 for (p = not_shallow_list; p; p = p->next)
207 p->item->object.flags |= not_shallow_flag;
210 * mark border commits SHALLOW + NOT_SHALLOW.
211 * We cannot clear NOT_SHALLOW right now. Imagine border
212 * commit A is processed first, then commit B, whose parent is
213 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
214 * itself is considered border at step 2, which is incorrect.
216 for (p = not_shallow_list; p; p = p->next) {
217 struct commit *c = p->item;
218 struct commit_list *parent;
220 if (parse_commit(c))
221 die("unable to parse commit %s",
222 oid_to_hex(&c->object.oid));
224 for (parent = c->parents; parent; parent = parent->next)
225 if (!(parent->item->object.flags & not_shallow_flag)) {
226 c->object.flags |= shallow_flag;
227 commit_list_insert(c, &result);
228 break;
231 free_commit_list(not_shallow_list);
234 * Now we can clean up NOT_SHALLOW on border commits. Having
235 * both flags set can confuse the caller.
237 for (p = result; p; p = p->next) {
238 struct object *o = &p->item->object;
239 if ((o->flags & both_flags) == both_flags)
240 o->flags &= ~not_shallow_flag;
242 return result;
245 static void check_shallow_file_for_update(struct repository *r)
247 if (r->parsed_objects->is_shallow == -1)
248 BUG("shallow must be initialized by now");
250 if (!stat_validity_check(r->parsed_objects->shallow_stat,
251 git_path_shallow(r)))
252 die("shallow file has changed since we read it");
255 #define SEEN_ONLY 1
256 #define VERBOSE 2
257 #define QUICK 4
259 struct write_shallow_data {
260 struct strbuf *out;
261 int use_pack_protocol;
262 int count;
263 unsigned flags;
266 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
268 struct write_shallow_data *data = cb_data;
269 const char *hex = oid_to_hex(&graft->oid);
270 if (graft->nr_parent != -1)
271 return 0;
272 if (data->flags & QUICK) {
273 if (!has_object_file(&graft->oid))
274 return 0;
275 } else if (data->flags & SEEN_ONLY) {
276 struct commit *c = lookup_commit(the_repository, &graft->oid);
277 if (!c || !(c->object.flags & SEEN)) {
278 if (data->flags & VERBOSE)
279 printf("Removing %s from .git/shallow\n",
280 oid_to_hex(&c->object.oid));
281 return 0;
284 data->count++;
285 if (data->use_pack_protocol)
286 packet_buf_write(data->out, "shallow %s", hex);
287 else {
288 strbuf_addstr(data->out, hex);
289 strbuf_addch(data->out, '\n');
291 return 0;
294 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
295 const struct oid_array *extra,
296 unsigned flags)
298 struct write_shallow_data data;
299 int i;
300 data.out = out;
301 data.use_pack_protocol = use_pack_protocol;
302 data.count = 0;
303 data.flags = flags;
304 for_each_commit_graft(write_one_shallow, &data);
305 if (!extra)
306 return data.count;
307 for (i = 0; i < extra->nr; i++) {
308 strbuf_addstr(out, oid_to_hex(extra->oid + i));
309 strbuf_addch(out, '\n');
310 data.count++;
312 return data.count;
315 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
316 const struct oid_array *extra)
318 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
321 const char *setup_temporary_shallow(const struct oid_array *extra)
323 struct tempfile *temp;
324 struct strbuf sb = STRBUF_INIT;
326 if (write_shallow_commits(&sb, 0, extra)) {
327 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
329 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
330 close_tempfile_gently(temp) < 0)
331 die_errno("failed to write to %s",
332 get_tempfile_path(temp));
333 strbuf_release(&sb);
334 return get_tempfile_path(temp);
337 * is_repository_shallow() sees empty string as "no shallow
338 * file".
340 return "";
343 void setup_alternate_shallow(struct lock_file *shallow_lock,
344 const char **alternate_shallow_file,
345 const struct oid_array *extra)
347 struct strbuf sb = STRBUF_INIT;
348 int fd;
350 fd = hold_lock_file_for_update(shallow_lock,
351 git_path_shallow(the_repository),
352 LOCK_DIE_ON_ERROR);
353 check_shallow_file_for_update(the_repository);
354 if (write_shallow_commits(&sb, 0, extra)) {
355 if (write_in_full(fd, sb.buf, sb.len) < 0)
356 die_errno("failed to write to %s",
357 get_lock_file_path(shallow_lock));
358 *alternate_shallow_file = get_lock_file_path(shallow_lock);
359 } else
361 * is_repository_shallow() sees empty string as "no
362 * shallow file".
364 *alternate_shallow_file = "";
365 strbuf_release(&sb);
368 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
370 int fd = *(int *)cb;
371 if (graft->nr_parent == -1)
372 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
373 return 0;
376 void advertise_shallow_grafts(int fd)
378 if (!is_repository_shallow(the_repository))
379 return;
380 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
384 * mark_reachable_objects() should have been run prior to this and all
385 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
386 * in which case lines are excised from the shallow file if they refer to
387 * commits that do not exist (any longer).
389 void prune_shallow(unsigned options)
391 struct lock_file shallow_lock = LOCK_INIT;
392 struct strbuf sb = STRBUF_INIT;
393 unsigned flags = SEEN_ONLY;
394 int fd;
396 if (options & PRUNE_QUICK)
397 flags |= QUICK;
399 if (options & PRUNE_SHOW_ONLY) {
400 flags |= VERBOSE;
401 write_shallow_commits_1(&sb, 0, NULL, flags);
402 strbuf_release(&sb);
403 return;
405 fd = hold_lock_file_for_update(&shallow_lock,
406 git_path_shallow(the_repository),
407 LOCK_DIE_ON_ERROR);
408 check_shallow_file_for_update(the_repository);
409 if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
410 if (write_in_full(fd, sb.buf, sb.len) < 0)
411 die_errno("failed to write to %s",
412 get_lock_file_path(&shallow_lock));
413 commit_lock_file(&shallow_lock);
414 } else {
415 unlink(git_path_shallow(the_repository));
416 rollback_lock_file(&shallow_lock);
418 strbuf_release(&sb);
421 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
424 * Step 1, split sender shallow commits into "ours" and "theirs"
425 * Step 2, clean "ours" based on .git/shallow
427 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
429 int i;
430 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
431 memset(info, 0, sizeof(*info));
432 info->shallow = sa;
433 if (!sa)
434 return;
435 ALLOC_ARRAY(info->ours, sa->nr);
436 ALLOC_ARRAY(info->theirs, sa->nr);
437 for (i = 0; i < sa->nr; i++) {
438 if (has_object_file(sa->oid + i)) {
439 struct commit_graft *graft;
440 graft = lookup_commit_graft(the_repository,
441 &sa->oid[i]);
442 if (graft && graft->nr_parent < 0)
443 continue;
444 info->ours[info->nr_ours++] = i;
445 } else
446 info->theirs[info->nr_theirs++] = i;
450 void clear_shallow_info(struct shallow_info *info)
452 free(info->ours);
453 free(info->theirs);
456 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
458 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
460 struct object_id *oid = info->shallow->oid;
461 int i, dst;
462 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
463 for (i = dst = 0; i < info->nr_theirs; i++) {
464 if (i != dst)
465 info->theirs[dst] = info->theirs[i];
466 if (has_object_file(oid + info->theirs[i]))
467 dst++;
469 info->nr_theirs = dst;
472 define_commit_slab(ref_bitmap, uint32_t *);
474 #define POOL_SIZE (512 * 1024)
476 struct paint_info {
477 struct ref_bitmap ref_bitmap;
478 unsigned nr_bits;
479 char **pools;
480 char *free, *end;
481 unsigned pool_count;
484 static uint32_t *paint_alloc(struct paint_info *info)
486 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
487 unsigned size = nr * sizeof(uint32_t);
488 void *p;
489 if (!info->pool_count || size > info->end - info->free) {
490 if (size > POOL_SIZE)
491 BUG("pool size too small for %d in paint_alloc()",
492 size);
493 info->pool_count++;
494 REALLOC_ARRAY(info->pools, info->pool_count);
495 info->free = xmalloc(POOL_SIZE);
496 info->pools[info->pool_count - 1] = info->free;
497 info->end = info->free + POOL_SIZE;
499 p = info->free;
500 info->free += size;
501 return p;
505 * Given a commit SHA-1, walk down to parents until either SEEN,
506 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
507 * all walked commits.
509 static void paint_down(struct paint_info *info, const struct object_id *oid,
510 unsigned int id)
512 unsigned int i, nr;
513 struct commit_list *head = NULL;
514 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
515 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
516 struct commit *c = lookup_commit_reference_gently(the_repository, oid,
518 uint32_t *tmp; /* to be freed before return */
519 uint32_t *bitmap;
521 if (!c)
522 return;
524 tmp = xmalloc(bitmap_size);
525 bitmap = paint_alloc(info);
526 memset(bitmap, 0, bitmap_size);
527 bitmap[id / 32] |= (1U << (id % 32));
528 commit_list_insert(c, &head);
529 while (head) {
530 struct commit_list *p;
531 struct commit *c = pop_commit(&head);
532 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
534 /* XXX check "UNINTERESTING" from pack bitmaps if available */
535 if (c->object.flags & (SEEN | UNINTERESTING))
536 continue;
537 else
538 c->object.flags |= SEEN;
540 if (*refs == NULL)
541 *refs = bitmap;
542 else {
543 memcpy(tmp, *refs, bitmap_size);
544 for (i = 0; i < bitmap_nr; i++)
545 tmp[i] |= bitmap[i];
546 if (memcmp(tmp, *refs, bitmap_size)) {
547 *refs = paint_alloc(info);
548 memcpy(*refs, tmp, bitmap_size);
552 if (c->object.flags & BOTTOM)
553 continue;
555 if (parse_commit(c))
556 die("unable to parse commit %s",
557 oid_to_hex(&c->object.oid));
559 for (p = c->parents; p; p = p->next) {
560 if (p->item->object.flags & SEEN)
561 continue;
562 commit_list_insert(p->item, &head);
566 nr = get_max_object_index();
567 for (i = 0; i < nr; i++) {
568 struct object *o = get_indexed_object(i);
569 if (o && o->type == OBJ_COMMIT)
570 o->flags &= ~SEEN;
573 free(tmp);
576 static int mark_uninteresting(const char *refname, const struct object_id *oid,
577 int flags, void *cb_data)
579 struct commit *commit = lookup_commit_reference_gently(the_repository,
580 oid, 1);
581 if (!commit)
582 return 0;
583 commit->object.flags |= UNINTERESTING;
584 mark_parents_uninteresting(commit);
585 return 0;
588 static void post_assign_shallow(struct shallow_info *info,
589 struct ref_bitmap *ref_bitmap,
590 int *ref_status);
592 * Step 6(+7), associate shallow commits with new refs
594 * info->ref must be initialized before calling this function.
596 * If used is not NULL, it's an array of info->shallow->nr
597 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
598 * m-th shallow commit from info->shallow.
600 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
601 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
602 * the ref needs some shallow commits from either info->ours or
603 * info->theirs.
605 void assign_shallow_commits_to_refs(struct shallow_info *info,
606 uint32_t **used, int *ref_status)
608 struct object_id *oid = info->shallow->oid;
609 struct oid_array *ref = info->ref;
610 unsigned int i, nr;
611 int *shallow, nr_shallow = 0;
612 struct paint_info pi;
614 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
615 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
616 for (i = 0; i < info->nr_ours; i++)
617 shallow[nr_shallow++] = info->ours[i];
618 for (i = 0; i < info->nr_theirs; i++)
619 shallow[nr_shallow++] = info->theirs[i];
622 * Prepare the commit graph to track what refs can reach what
623 * (new) shallow commits.
625 nr = get_max_object_index();
626 for (i = 0; i < nr; i++) {
627 struct object *o = get_indexed_object(i);
628 if (!o || o->type != OBJ_COMMIT)
629 continue;
631 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
634 memset(&pi, 0, sizeof(pi));
635 init_ref_bitmap(&pi.ref_bitmap);
636 pi.nr_bits = ref->nr;
639 * "--not --all" to cut short the traversal if new refs
640 * connect to old refs. If not (e.g. force ref updates) it'll
641 * have to go down to the current shallow commits.
643 head_ref(mark_uninteresting, NULL);
644 for_each_ref(mark_uninteresting, NULL);
646 /* Mark potential bottoms so we won't go out of bound */
647 for (i = 0; i < nr_shallow; i++) {
648 struct commit *c = lookup_commit(the_repository,
649 &oid[shallow[i]]);
650 c->object.flags |= BOTTOM;
653 for (i = 0; i < ref->nr; i++)
654 paint_down(&pi, ref->oid + i, i);
656 if (used) {
657 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
658 memset(used, 0, sizeof(*used) * info->shallow->nr);
659 for (i = 0; i < nr_shallow; i++) {
660 const struct commit *c = lookup_commit(the_repository,
661 &oid[shallow[i]]);
662 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
663 if (*map)
664 used[shallow[i]] = xmemdupz(*map, bitmap_size);
667 * unreachable shallow commits are not removed from
668 * "ours" and "theirs". The user is supposed to run
669 * step 7 on every ref separately and not trust "ours"
670 * and "theirs" any more.
672 } else
673 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
675 clear_ref_bitmap(&pi.ref_bitmap);
676 for (i = 0; i < pi.pool_count; i++)
677 free(pi.pools[i]);
678 free(pi.pools);
679 free(shallow);
682 struct commit_array {
683 struct commit **commits;
684 int nr, alloc;
687 static int add_ref(const char *refname, const struct object_id *oid,
688 int flags, void *cb_data)
690 struct commit_array *ca = cb_data;
691 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
692 ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
693 oid, 1);
694 if (ca->commits[ca->nr])
695 ca->nr++;
696 return 0;
699 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
701 unsigned int i;
702 if (!ref_status)
703 return;
704 for (i = 0; i < nr; i++)
705 if (bitmap[i / 32] & (1U << (i % 32)))
706 ref_status[i]++;
710 * Step 7, reachability test on "ours" at commit level
712 static void post_assign_shallow(struct shallow_info *info,
713 struct ref_bitmap *ref_bitmap,
714 int *ref_status)
716 struct object_id *oid = info->shallow->oid;
717 struct commit *c;
718 uint32_t **bitmap;
719 int dst, i, j;
720 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
721 struct commit_array ca;
723 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
724 if (ref_status)
725 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
727 /* Remove unreachable shallow commits from "theirs" */
728 for (i = dst = 0; i < info->nr_theirs; i++) {
729 if (i != dst)
730 info->theirs[dst] = info->theirs[i];
731 c = lookup_commit(the_repository, &oid[info->theirs[i]]);
732 bitmap = ref_bitmap_at(ref_bitmap, c);
733 if (!*bitmap)
734 continue;
735 for (j = 0; j < bitmap_nr; j++)
736 if (bitmap[0][j]) {
737 update_refstatus(ref_status, info->ref->nr, *bitmap);
738 dst++;
739 break;
742 info->nr_theirs = dst;
744 memset(&ca, 0, sizeof(ca));
745 head_ref(add_ref, &ca);
746 for_each_ref(add_ref, &ca);
748 /* Remove unreachable shallow commits from "ours" */
749 for (i = dst = 0; i < info->nr_ours; i++) {
750 if (i != dst)
751 info->ours[dst] = info->ours[i];
752 c = lookup_commit(the_repository, &oid[info->ours[i]]);
753 bitmap = ref_bitmap_at(ref_bitmap, c);
754 if (!*bitmap)
755 continue;
756 for (j = 0; j < bitmap_nr; j++)
757 if (bitmap[0][j] &&
758 /* Step 7, reachability test at commit level */
759 !in_merge_bases_many(c, ca.nr, ca.commits)) {
760 update_refstatus(ref_status, info->ref->nr, *bitmap);
761 dst++;
762 break;
765 info->nr_ours = dst;
767 free(ca.commits);
770 /* (Delayed) step 7, reachability test at commit level */
771 int delayed_reachability_test(struct shallow_info *si, int c)
773 if (si->need_reachability_test[c]) {
774 struct commit *commit = lookup_commit(the_repository,
775 &si->shallow->oid[c]);
777 if (!si->commits) {
778 struct commit_array ca;
780 memset(&ca, 0, sizeof(ca));
781 head_ref(add_ref, &ca);
782 for_each_ref(add_ref, &ca);
783 si->commits = ca.commits;
784 si->nr_commits = ca.nr;
787 si->reachable[c] = in_merge_bases_many(commit,
788 si->nr_commits,
789 si->commits);
790 si->need_reachability_test[c] = 0;
792 return si->reachable[c];