2 #include "repository.h"
5 #include "object-store.h"
11 #include "oid-array.h"
14 #include "commit-slab.h"
15 #include "list-objects.h"
16 #include "commit-reach.h"
19 void set_alternate_shallow_file(struct repository
*r
, const char *path
, int override
)
21 if (r
->parsed_objects
->is_shallow
!= -1)
22 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
23 if (r
->parsed_objects
->alternate_shallow_file
&& !override
)
25 free(r
->parsed_objects
->alternate_shallow_file
);
26 r
->parsed_objects
->alternate_shallow_file
= xstrdup_or_null(path
);
29 int register_shallow(struct repository
*r
, const struct object_id
*oid
)
31 struct commit_graft
*graft
=
32 xmalloc(sizeof(struct commit_graft
));
33 struct commit
*commit
= lookup_commit(the_repository
, oid
);
35 oidcpy(&graft
->oid
, oid
);
36 graft
->nr_parent
= -1;
37 if (commit
&& commit
->object
.parsed
)
38 commit
->parents
= NULL
;
39 return register_commit_graft(r
, graft
, 0);
42 int unregister_shallow(const struct object_id
*oid
)
44 int pos
= commit_graft_pos(the_repository
, oid
);
47 if (pos
+ 1 < the_repository
->parsed_objects
->grafts_nr
)
48 MOVE_ARRAY(the_repository
->parsed_objects
->grafts
+ pos
,
49 the_repository
->parsed_objects
->grafts
+ pos
+ 1,
50 the_repository
->parsed_objects
->grafts_nr
- pos
- 1);
51 the_repository
->parsed_objects
->grafts_nr
--;
55 int is_repository_shallow(struct repository
*r
)
59 const char *path
= r
->parsed_objects
->alternate_shallow_file
;
61 if (r
->parsed_objects
->is_shallow
>= 0)
62 return r
->parsed_objects
->is_shallow
;
65 path
= git_path_shallow(r
);
67 * fetch-pack sets '--shallow-file ""' as an indicator that no
68 * shallow file should be used. We could just open it and it
69 * will likely fail. But let's do an explicit check instead.
71 if (!*path
|| (fp
= fopen(path
, "r")) == NULL
) {
72 stat_validity_clear(r
->parsed_objects
->shallow_stat
);
73 r
->parsed_objects
->is_shallow
= 0;
74 return r
->parsed_objects
->is_shallow
;
76 stat_validity_update(r
->parsed_objects
->shallow_stat
, fileno(fp
));
77 r
->parsed_objects
->is_shallow
= 1;
79 while (fgets(buf
, sizeof(buf
), fp
)) {
81 if (get_oid_hex(buf
, &oid
))
82 die("bad shallow line: %s", buf
);
83 register_shallow(r
, &oid
);
86 return r
->parsed_objects
->is_shallow
;
89 static void reset_repository_shallow(struct repository
*r
)
91 r
->parsed_objects
->is_shallow
= -1;
92 stat_validity_clear(r
->parsed_objects
->shallow_stat
);
93 reset_commit_grafts(r
);
96 int commit_shallow_file(struct repository
*r
, struct shallow_lock
*lk
)
98 int res
= commit_lock_file(&lk
->lock
);
99 reset_repository_shallow(r
);
103 void rollback_shallow_file(struct repository
*r
, struct shallow_lock
*lk
)
105 rollback_lock_file(&lk
->lock
);
106 reset_repository_shallow(r
);
110 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
111 * supports a "valid" flag.
113 define_commit_slab(commit_depth
, int *);
114 static void free_depth_in_slab(int **ptr
)
118 struct commit_list
*get_shallow_commits(struct object_array
*heads
, int depth
,
119 int shallow_flag
, int not_shallow_flag
)
121 int i
= 0, cur_depth
= 0;
122 struct commit_list
*result
= NULL
;
123 struct object_array stack
= OBJECT_ARRAY_INIT
;
124 struct commit
*commit
= NULL
;
125 struct commit_graft
*graft
;
126 struct commit_depth depths
;
128 init_commit_depth(&depths
);
129 while (commit
|| i
< heads
->nr
|| stack
.nr
) {
130 struct commit_list
*p
;
134 commit
= (struct commit
*)
135 deref_tag(the_repository
,
136 heads
->objects
[i
++].item
,
138 if (!commit
|| commit
->object
.type
!= OBJ_COMMIT
) {
142 depth_slot
= commit_depth_at(&depths
, commit
);
144 *depth_slot
= xmalloc(sizeof(int));
148 commit
= (struct commit
*)
149 object_array_pop(&stack
);
150 cur_depth
= **commit_depth_at(&depths
, commit
);
153 parse_commit_or_die(commit
);
155 if ((depth
!= INFINITE_DEPTH
&& cur_depth
>= depth
) ||
156 (is_repository_shallow(the_repository
) && !commit
->parents
&&
157 (graft
= lookup_commit_graft(the_repository
, &commit
->object
.oid
)) != NULL
&&
158 graft
->nr_parent
< 0)) {
159 commit_list_insert(commit
, &result
);
160 commit
->object
.flags
|= shallow_flag
;
164 commit
->object
.flags
|= not_shallow_flag
;
165 for (p
= commit
->parents
, commit
= NULL
; p
; p
= p
->next
) {
166 int **depth_slot
= commit_depth_at(&depths
, p
->item
);
168 *depth_slot
= xmalloc(sizeof(int));
169 **depth_slot
= cur_depth
;
171 if (cur_depth
>= **depth_slot
)
173 **depth_slot
= cur_depth
;
176 add_object_array(&p
->item
->object
,
180 cur_depth
= **commit_depth_at(&depths
, commit
);
184 deep_clear_commit_depth(&depths
, free_depth_in_slab
);
189 static void show_commit(struct commit
*commit
, void *data
)
191 commit_list_insert(commit
, data
);
195 * Given rev-list arguments, run rev-list. All reachable commits
196 * except border ones are marked with not_shallow_flag. Border commits
197 * are marked with shallow_flag. The list of border/shallow commits
200 struct commit_list
*get_shallow_commits_by_rev_list(int ac
, const char **av
,
202 int not_shallow_flag
)
204 struct commit_list
*result
= NULL
, *p
;
205 struct commit_list
*not_shallow_list
= NULL
;
206 struct rev_info revs
;
207 int both_flags
= shallow_flag
| not_shallow_flag
;
210 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
211 * set at this point. But better be safe than sorry.
213 clear_object_flags(both_flags
);
215 is_repository_shallow(the_repository
); /* make sure shallows are read */
217 repo_init_revisions(the_repository
, &revs
, NULL
);
218 save_commit_buffer
= 0;
219 setup_revisions(ac
, av
, &revs
, NULL
);
221 if (prepare_revision_walk(&revs
))
222 die("revision walk setup failed");
223 traverse_commit_list(&revs
, show_commit
, NULL
, ¬_shallow_list
);
225 if (!not_shallow_list
)
226 die("no commits selected for shallow requests");
228 /* Mark all reachable commits as NOT_SHALLOW */
229 for (p
= not_shallow_list
; p
; p
= p
->next
)
230 p
->item
->object
.flags
|= not_shallow_flag
;
233 * mark border commits SHALLOW + NOT_SHALLOW.
234 * We cannot clear NOT_SHALLOW right now. Imagine border
235 * commit A is processed first, then commit B, whose parent is
236 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
237 * itself is considered border at step 2, which is incorrect.
239 for (p
= not_shallow_list
; p
; p
= p
->next
) {
240 struct commit
*c
= p
->item
;
241 struct commit_list
*parent
;
244 die("unable to parse commit %s",
245 oid_to_hex(&c
->object
.oid
));
247 for (parent
= c
->parents
; parent
; parent
= parent
->next
)
248 if (!(parent
->item
->object
.flags
& not_shallow_flag
)) {
249 c
->object
.flags
|= shallow_flag
;
250 commit_list_insert(c
, &result
);
254 free_commit_list(not_shallow_list
);
257 * Now we can clean up NOT_SHALLOW on border commits. Having
258 * both flags set can confuse the caller.
260 for (p
= result
; p
; p
= p
->next
) {
261 struct object
*o
= &p
->item
->object
;
262 if ((o
->flags
& both_flags
) == both_flags
)
263 o
->flags
&= ~not_shallow_flag
;
268 static void check_shallow_file_for_update(struct repository
*r
)
270 if (r
->parsed_objects
->is_shallow
== -1)
271 BUG("shallow must be initialized by now");
273 if (!stat_validity_check(r
->parsed_objects
->shallow_stat
,
274 git_path_shallow(r
)))
275 die("shallow file has changed since we read it");
282 struct write_shallow_data
{
284 int use_pack_protocol
;
289 static int write_one_shallow(const struct commit_graft
*graft
, void *cb_data
)
291 struct write_shallow_data
*data
= cb_data
;
292 const char *hex
= oid_to_hex(&graft
->oid
);
293 if (graft
->nr_parent
!= -1)
295 if (data
->flags
& QUICK
) {
296 if (!has_object_file(&graft
->oid
))
298 } else if (data
->flags
& SEEN_ONLY
) {
299 struct commit
*c
= lookup_commit(the_repository
, &graft
->oid
);
300 if (!c
|| !(c
->object
.flags
& SEEN
)) {
301 if (data
->flags
& VERBOSE
)
302 printf("Removing %s from .git/shallow\n",
303 oid_to_hex(&c
->object
.oid
));
308 if (data
->use_pack_protocol
)
309 packet_buf_write(data
->out
, "shallow %s", hex
);
311 strbuf_addstr(data
->out
, hex
);
312 strbuf_addch(data
->out
, '\n');
317 static int write_shallow_commits_1(struct strbuf
*out
, int use_pack_protocol
,
318 const struct oid_array
*extra
,
321 struct write_shallow_data data
;
324 data
.use_pack_protocol
= use_pack_protocol
;
327 for_each_commit_graft(write_one_shallow
, &data
);
330 for (i
= 0; i
< extra
->nr
; i
++) {
331 strbuf_addstr(out
, oid_to_hex(extra
->oid
+ i
));
332 strbuf_addch(out
, '\n');
338 int write_shallow_commits(struct strbuf
*out
, int use_pack_protocol
,
339 const struct oid_array
*extra
)
341 return write_shallow_commits_1(out
, use_pack_protocol
, extra
, 0);
344 const char *setup_temporary_shallow(const struct oid_array
*extra
)
346 struct tempfile
*temp
;
347 struct strbuf sb
= STRBUF_INIT
;
349 if (write_shallow_commits(&sb
, 0, extra
)) {
350 temp
= xmks_tempfile(git_path("shallow_XXXXXX"));
352 if (write_in_full(temp
->fd
, sb
.buf
, sb
.len
) < 0 ||
353 close_tempfile_gently(temp
) < 0)
354 die_errno("failed to write to %s",
355 get_tempfile_path(temp
));
357 return get_tempfile_path(temp
);
360 * is_repository_shallow() sees empty string as "no shallow
366 void setup_alternate_shallow(struct shallow_lock
*shallow_lock
,
367 const char **alternate_shallow_file
,
368 const struct oid_array
*extra
)
370 struct strbuf sb
= STRBUF_INIT
;
373 fd
= hold_lock_file_for_update(&shallow_lock
->lock
,
374 git_path_shallow(the_repository
),
376 check_shallow_file_for_update(the_repository
);
377 if (write_shallow_commits(&sb
, 0, extra
)) {
378 if (write_in_full(fd
, sb
.buf
, sb
.len
) < 0)
379 die_errno("failed to write to %s",
380 get_lock_file_path(&shallow_lock
->lock
));
381 *alternate_shallow_file
= get_lock_file_path(&shallow_lock
->lock
);
384 * is_repository_shallow() sees empty string as "no
387 *alternate_shallow_file
= "";
391 static int advertise_shallow_grafts_cb(const struct commit_graft
*graft
, void *cb
)
394 if (graft
->nr_parent
== -1)
395 packet_write_fmt(fd
, "shallow %s\n", oid_to_hex(&graft
->oid
));
399 void advertise_shallow_grafts(int fd
)
401 if (!is_repository_shallow(the_repository
))
403 for_each_commit_graft(advertise_shallow_grafts_cb
, &fd
);
407 * mark_reachable_objects() should have been run prior to this and all
408 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
409 * in which case lines are excised from the shallow file if they refer to
410 * commits that do not exist (any longer).
412 void prune_shallow(unsigned options
)
414 struct shallow_lock shallow_lock
= SHALLOW_LOCK_INIT
;
415 struct strbuf sb
= STRBUF_INIT
;
416 unsigned flags
= SEEN_ONLY
;
419 if (options
& PRUNE_QUICK
)
422 if (options
& PRUNE_SHOW_ONLY
) {
424 write_shallow_commits_1(&sb
, 0, NULL
, flags
);
428 fd
= hold_lock_file_for_update(&shallow_lock
.lock
,
429 git_path_shallow(the_repository
),
431 check_shallow_file_for_update(the_repository
);
432 if (write_shallow_commits_1(&sb
, 0, NULL
, flags
)) {
433 if (write_in_full(fd
, sb
.buf
, sb
.len
) < 0)
434 die_errno("failed to write to %s",
435 get_lock_file_path(&shallow_lock
.lock
));
436 commit_shallow_file(the_repository
, &shallow_lock
);
438 unlink(git_path_shallow(the_repository
));
439 rollback_shallow_file(the_repository
, &shallow_lock
);
444 struct trace_key trace_shallow
= TRACE_KEY_INIT(SHALLOW
);
447 * Step 1, split sender shallow commits into "ours" and "theirs"
448 * Step 2, clean "ours" based on .git/shallow
450 void prepare_shallow_info(struct shallow_info
*info
, struct oid_array
*sa
)
453 trace_printf_key(&trace_shallow
, "shallow: prepare_shallow_info\n");
454 memset(info
, 0, sizeof(*info
));
458 ALLOC_ARRAY(info
->ours
, sa
->nr
);
459 ALLOC_ARRAY(info
->theirs
, sa
->nr
);
460 for (i
= 0; i
< sa
->nr
; i
++) {
461 if (has_object_file(sa
->oid
+ i
)) {
462 struct commit_graft
*graft
;
463 graft
= lookup_commit_graft(the_repository
,
465 if (graft
&& graft
->nr_parent
< 0)
467 info
->ours
[info
->nr_ours
++] = i
;
469 info
->theirs
[info
->nr_theirs
++] = i
;
473 void clear_shallow_info(struct shallow_info
*info
)
479 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
481 void remove_nonexistent_theirs_shallow(struct shallow_info
*info
)
483 struct object_id
*oid
= info
->shallow
->oid
;
485 trace_printf_key(&trace_shallow
, "shallow: remove_nonexistent_theirs_shallow\n");
486 for (i
= dst
= 0; i
< info
->nr_theirs
; i
++) {
488 info
->theirs
[dst
] = info
->theirs
[i
];
489 if (has_object_file(oid
+ info
->theirs
[i
]))
492 info
->nr_theirs
= dst
;
495 define_commit_slab(ref_bitmap
, uint32_t *);
497 #define POOL_SIZE (512 * 1024)
500 struct ref_bitmap ref_bitmap
;
507 static uint32_t *paint_alloc(struct paint_info
*info
)
509 unsigned nr
= DIV_ROUND_UP(info
->nr_bits
, 32);
510 unsigned size
= nr
* sizeof(uint32_t);
512 if (!info
->pool_count
|| size
> info
->end
- info
->free
) {
513 if (size
> POOL_SIZE
)
514 BUG("pool size too small for %d in paint_alloc()",
517 REALLOC_ARRAY(info
->pools
, info
->pool_count
);
518 info
->free
= xmalloc(POOL_SIZE
);
519 info
->pools
[info
->pool_count
- 1] = info
->free
;
520 info
->end
= info
->free
+ POOL_SIZE
;
528 * Given a commit SHA-1, walk down to parents until either SEEN,
529 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
530 * all walked commits.
532 static void paint_down(struct paint_info
*info
, const struct object_id
*oid
,
536 struct commit_list
*head
= NULL
;
537 int bitmap_nr
= DIV_ROUND_UP(info
->nr_bits
, 32);
538 size_t bitmap_size
= st_mult(sizeof(uint32_t), bitmap_nr
);
539 struct commit
*c
= lookup_commit_reference_gently(the_repository
, oid
,
541 uint32_t *tmp
; /* to be freed before return */
547 tmp
= xmalloc(bitmap_size
);
548 bitmap
= paint_alloc(info
);
549 memset(bitmap
, 0, bitmap_size
);
550 bitmap
[id
/ 32] |= (1U << (id
% 32));
551 commit_list_insert(c
, &head
);
553 struct commit_list
*p
;
554 struct commit
*c
= pop_commit(&head
);
555 uint32_t **refs
= ref_bitmap_at(&info
->ref_bitmap
, c
);
557 /* XXX check "UNINTERESTING" from pack bitmaps if available */
558 if (c
->object
.flags
& (SEEN
| UNINTERESTING
))
561 c
->object
.flags
|= SEEN
;
566 memcpy(tmp
, *refs
, bitmap_size
);
567 for (i
= 0; i
< bitmap_nr
; i
++)
569 if (memcmp(tmp
, *refs
, bitmap_size
)) {
570 *refs
= paint_alloc(info
);
571 memcpy(*refs
, tmp
, bitmap_size
);
575 if (c
->object
.flags
& BOTTOM
)
579 die("unable to parse commit %s",
580 oid_to_hex(&c
->object
.oid
));
582 for (p
= c
->parents
; p
; p
= p
->next
) {
583 if (p
->item
->object
.flags
& SEEN
)
585 commit_list_insert(p
->item
, &head
);
589 nr
= get_max_object_index();
590 for (i
= 0; i
< nr
; i
++) {
591 struct object
*o
= get_indexed_object(i
);
592 if (o
&& o
->type
== OBJ_COMMIT
)
599 static int mark_uninteresting(const char *refname
, const struct object_id
*oid
,
600 int flags
, void *cb_data
)
602 struct commit
*commit
= lookup_commit_reference_gently(the_repository
,
606 commit
->object
.flags
|= UNINTERESTING
;
607 mark_parents_uninteresting(NULL
, commit
);
611 static void post_assign_shallow(struct shallow_info
*info
,
612 struct ref_bitmap
*ref_bitmap
,
615 * Step 6(+7), associate shallow commits with new refs
617 * info->ref must be initialized before calling this function.
619 * If used is not NULL, it's an array of info->shallow->nr
620 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
621 * m-th shallow commit from info->shallow.
623 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
624 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
625 * the ref needs some shallow commits from either info->ours or
628 void assign_shallow_commits_to_refs(struct shallow_info
*info
,
629 uint32_t **used
, int *ref_status
)
631 struct object_id
*oid
= info
->shallow
->oid
;
632 struct oid_array
*ref
= info
->ref
;
634 int *shallow
, nr_shallow
= 0;
635 struct paint_info pi
;
637 trace_printf_key(&trace_shallow
, "shallow: assign_shallow_commits_to_refs\n");
638 ALLOC_ARRAY(shallow
, info
->nr_ours
+ info
->nr_theirs
);
639 for (i
= 0; i
< info
->nr_ours
; i
++)
640 shallow
[nr_shallow
++] = info
->ours
[i
];
641 for (i
= 0; i
< info
->nr_theirs
; i
++)
642 shallow
[nr_shallow
++] = info
->theirs
[i
];
645 * Prepare the commit graph to track what refs can reach what
646 * (new) shallow commits.
648 nr
= get_max_object_index();
649 for (i
= 0; i
< nr
; i
++) {
650 struct object
*o
= get_indexed_object(i
);
651 if (!o
|| o
->type
!= OBJ_COMMIT
)
654 o
->flags
&= ~(UNINTERESTING
| BOTTOM
| SEEN
);
657 memset(&pi
, 0, sizeof(pi
));
658 init_ref_bitmap(&pi
.ref_bitmap
);
659 pi
.nr_bits
= ref
->nr
;
662 * "--not --all" to cut short the traversal if new refs
663 * connect to old refs. If not (e.g. force ref updates) it'll
664 * have to go down to the current shallow commits.
666 head_ref(mark_uninteresting
, NULL
);
667 for_each_ref(mark_uninteresting
, NULL
);
669 /* Mark potential bottoms so we won't go out of bound */
670 for (i
= 0; i
< nr_shallow
; i
++) {
671 struct commit
*c
= lookup_commit(the_repository
,
673 c
->object
.flags
|= BOTTOM
;
676 for (i
= 0; i
< ref
->nr
; i
++)
677 paint_down(&pi
, ref
->oid
+ i
, i
);
680 int bitmap_size
= DIV_ROUND_UP(pi
.nr_bits
, 32) * sizeof(uint32_t);
681 memset(used
, 0, sizeof(*used
) * info
->shallow
->nr
);
682 for (i
= 0; i
< nr_shallow
; i
++) {
683 const struct commit
*c
= lookup_commit(the_repository
,
685 uint32_t **map
= ref_bitmap_at(&pi
.ref_bitmap
, c
);
687 used
[shallow
[i
]] = xmemdupz(*map
, bitmap_size
);
690 * unreachable shallow commits are not removed from
691 * "ours" and "theirs". The user is supposed to run
692 * step 7 on every ref separately and not trust "ours"
693 * and "theirs" any more.
696 post_assign_shallow(info
, &pi
.ref_bitmap
, ref_status
);
698 clear_ref_bitmap(&pi
.ref_bitmap
);
699 for (i
= 0; i
< pi
.pool_count
; i
++)
705 struct commit_array
{
706 struct commit
**commits
;
710 static int add_ref(const char *refname
, const struct object_id
*oid
,
711 int flags
, void *cb_data
)
713 struct commit_array
*ca
= cb_data
;
714 ALLOC_GROW(ca
->commits
, ca
->nr
+ 1, ca
->alloc
);
715 ca
->commits
[ca
->nr
] = lookup_commit_reference_gently(the_repository
,
717 if (ca
->commits
[ca
->nr
])
722 static void update_refstatus(int *ref_status
, int nr
, uint32_t *bitmap
)
727 for (i
= 0; i
< nr
; i
++)
728 if (bitmap
[i
/ 32] & (1U << (i
% 32)))
733 * Step 7, reachability test on "ours" at commit level
735 static void post_assign_shallow(struct shallow_info
*info
,
736 struct ref_bitmap
*ref_bitmap
,
739 struct object_id
*oid
= info
->shallow
->oid
;
743 int bitmap_nr
= DIV_ROUND_UP(info
->ref
->nr
, 32);
744 struct commit_array ca
;
746 trace_printf_key(&trace_shallow
, "shallow: post_assign_shallow\n");
748 memset(ref_status
, 0, sizeof(*ref_status
) * info
->ref
->nr
);
750 /* Remove unreachable shallow commits from "theirs" */
751 for (i
= dst
= 0; i
< info
->nr_theirs
; i
++) {
753 info
->theirs
[dst
] = info
->theirs
[i
];
754 c
= lookup_commit(the_repository
, &oid
[info
->theirs
[i
]]);
755 bitmap
= ref_bitmap_at(ref_bitmap
, c
);
758 for (j
= 0; j
< bitmap_nr
; j
++)
760 update_refstatus(ref_status
, info
->ref
->nr
, *bitmap
);
765 info
->nr_theirs
= dst
;
767 memset(&ca
, 0, sizeof(ca
));
768 head_ref(add_ref
, &ca
);
769 for_each_ref(add_ref
, &ca
);
771 /* Remove unreachable shallow commits from "ours" */
772 for (i
= dst
= 0; i
< info
->nr_ours
; i
++) {
774 info
->ours
[dst
] = info
->ours
[i
];
775 c
= lookup_commit(the_repository
, &oid
[info
->ours
[i
]]);
776 bitmap
= ref_bitmap_at(ref_bitmap
, c
);
779 for (j
= 0; j
< bitmap_nr
; j
++)
781 /* Step 7, reachability test at commit level */
782 !in_merge_bases_many(c
, ca
.nr
, ca
.commits
)) {
783 update_refstatus(ref_status
, info
->ref
->nr
, *bitmap
);
793 /* (Delayed) step 7, reachability test at commit level */
794 int delayed_reachability_test(struct shallow_info
*si
, int c
)
796 if (si
->need_reachability_test
[c
]) {
797 struct commit
*commit
= lookup_commit(the_repository
,
798 &si
->shallow
->oid
[c
]);
801 struct commit_array ca
;
803 memset(&ca
, 0, sizeof(ca
));
804 head_ref(add_ref
, &ca
);
805 for_each_ref(add_ref
, &ca
);
806 si
->commits
= ca
.commits
;
807 si
->nr_commits
= ca
.nr
;
810 si
->reachable
[c
] = in_merge_bases_many(commit
,
813 si
->need_reachability_test
[c
] = 0;
815 return si
->reachable
[c
];