9 #include "sha1-array.h"
12 #include "commit-slab.h"
14 #include "list-objects.h"
16 static int is_shallow
= -1;
17 static struct stat_validity shallow_stat
;
18 static char *alternate_shallow_file
;
20 void set_alternate_shallow_file(const char *path
, int override
)
23 die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
24 if (alternate_shallow_file
&& !override
)
26 free(alternate_shallow_file
);
27 alternate_shallow_file
= xstrdup_or_null(path
);
30 int register_shallow(const unsigned char *sha1
)
32 struct commit_graft
*graft
=
33 xmalloc(sizeof(struct commit_graft
));
34 struct commit
*commit
= lookup_commit(sha1
);
36 hashcpy(graft
->oid
.hash
, sha1
);
37 graft
->nr_parent
= -1;
38 if (commit
&& commit
->object
.parsed
)
39 commit
->parents
= NULL
;
40 return register_commit_graft(graft
, 0);
43 int is_repository_shallow(void)
47 const char *path
= alternate_shallow_file
;
53 path
= git_path_shallow();
55 * fetch-pack sets '--shallow-file ""' as an indicator that no
56 * shallow file should be used. We could just open it and it
57 * will likely fail. But let's do an explicit check instead.
59 if (!*path
|| (fp
= fopen(path
, "r")) == NULL
) {
60 stat_validity_clear(&shallow_stat
);
64 stat_validity_update(&shallow_stat
, fileno(fp
));
67 while (fgets(buf
, sizeof(buf
), fp
)) {
68 unsigned char sha1
[20];
69 if (get_sha1_hex(buf
, sha1
))
70 die("bad shallow line: %s", buf
);
71 register_shallow(sha1
);
77 struct commit_list
*get_shallow_commits(struct object_array
*heads
, int depth
,
78 int shallow_flag
, int not_shallow_flag
)
80 int i
= 0, cur_depth
= 0;
81 struct commit_list
*result
= NULL
;
82 struct object_array stack
= OBJECT_ARRAY_INIT
;
83 struct commit
*commit
= NULL
;
84 struct commit_graft
*graft
;
86 while (commit
|| i
< heads
->nr
|| stack
.nr
) {
87 struct commit_list
*p
;
90 commit
= (struct commit
*)
91 deref_tag(heads
->objects
[i
++].item
, NULL
, 0);
92 if (!commit
|| commit
->object
.type
!= OBJ_COMMIT
) {
97 commit
->util
= xmalloc(sizeof(int));
98 *(int *)commit
->util
= 0;
101 commit
= (struct commit
*)
102 stack
.objects
[--stack
.nr
].item
;
103 cur_depth
= *(int *)commit
->util
;
106 parse_commit_or_die(commit
);
108 if ((depth
!= INFINITE_DEPTH
&& cur_depth
>= depth
) ||
109 (is_repository_shallow() && !commit
->parents
&&
110 (graft
= lookup_commit_graft(commit
->object
.oid
.hash
)) != NULL
&&
111 graft
->nr_parent
< 0)) {
112 commit_list_insert(commit
, &result
);
113 commit
->object
.flags
|= shallow_flag
;
117 commit
->object
.flags
|= not_shallow_flag
;
118 for (p
= commit
->parents
, commit
= NULL
; p
; p
= p
->next
) {
119 if (!p
->item
->util
) {
120 int *pointer
= xmalloc(sizeof(int));
121 p
->item
->util
= pointer
;
122 *pointer
= cur_depth
;
124 int *pointer
= p
->item
->util
;
125 if (cur_depth
>= *pointer
)
127 *pointer
= cur_depth
;
130 add_object_array(&p
->item
->object
,
134 cur_depth
= *(int *)commit
->util
;
142 static void show_commit(struct commit
*commit
, void *data
)
144 commit_list_insert(commit
, data
);
148 * Given rev-list arguments, run rev-list. All reachable commits
149 * except border ones are marked with not_shallow_flag. Border commits
150 * are marked with shallow_flag. The list of border/shallow commits
153 struct commit_list
*get_shallow_commits_by_rev_list(int ac
, const char **av
,
155 int not_shallow_flag
)
157 struct commit_list
*result
= NULL
, *p
;
158 struct commit_list
*not_shallow_list
= NULL
;
159 struct rev_info revs
;
160 int both_flags
= shallow_flag
| not_shallow_flag
;
163 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
164 * set at this point. But better be safe than sorry.
166 clear_object_flags(both_flags
);
168 is_repository_shallow(); /* make sure shallows are read */
170 init_revisions(&revs
, NULL
);
171 save_commit_buffer
= 0;
172 setup_revisions(ac
, av
, &revs
, NULL
);
174 if (prepare_revision_walk(&revs
))
175 die("revision walk setup failed");
176 traverse_commit_list(&revs
, show_commit
, NULL
, ¬_shallow_list
);
178 /* Mark all reachable commits as NOT_SHALLOW */
179 for (p
= not_shallow_list
; p
; p
= p
->next
)
180 p
->item
->object
.flags
|= not_shallow_flag
;
183 * mark border commits SHALLOW + NOT_SHALLOW.
184 * We cannot clear NOT_SHALLOW right now. Imagine border
185 * commit A is processed first, then commit B, whose parent is
186 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
187 * itself is considered border at step 2, which is incorrect.
189 for (p
= not_shallow_list
; p
; p
= p
->next
) {
190 struct commit
*c
= p
->item
;
191 struct commit_list
*parent
;
194 die("unable to parse commit %s",
195 oid_to_hex(&c
->object
.oid
));
197 for (parent
= c
->parents
; parent
; parent
= parent
->next
)
198 if (!(parent
->item
->object
.flags
& not_shallow_flag
)) {
199 c
->object
.flags
|= shallow_flag
;
200 commit_list_insert(c
, &result
);
204 free_commit_list(not_shallow_list
);
207 * Now we can clean up NOT_SHALLOW on border commits. Having
208 * both flags set can confuse the caller.
210 for (p
= result
; p
; p
= p
->next
) {
211 struct object
*o
= &p
->item
->object
;
212 if ((o
->flags
& both_flags
) == both_flags
)
213 o
->flags
&= ~not_shallow_flag
;
218 static void check_shallow_file_for_update(void)
220 if (is_shallow
== -1)
221 die("BUG: shallow must be initialized by now");
223 if (!stat_validity_check(&shallow_stat
, git_path_shallow()))
224 die("shallow file has changed since we read it");
230 struct write_shallow_data
{
232 int use_pack_protocol
;
237 static int write_one_shallow(const struct commit_graft
*graft
, void *cb_data
)
239 struct write_shallow_data
*data
= cb_data
;
240 const char *hex
= oid_to_hex(&graft
->oid
);
241 if (graft
->nr_parent
!= -1)
243 if (data
->flags
& SEEN_ONLY
) {
244 struct commit
*c
= lookup_commit(graft
->oid
.hash
);
245 if (!c
|| !(c
->object
.flags
& SEEN
)) {
246 if (data
->flags
& VERBOSE
)
247 printf("Removing %s from .git/shallow\n",
248 oid_to_hex(&c
->object
.oid
));
253 if (data
->use_pack_protocol
)
254 packet_buf_write(data
->out
, "shallow %s", hex
);
256 strbuf_addstr(data
->out
, hex
);
257 strbuf_addch(data
->out
, '\n');
262 static int write_shallow_commits_1(struct strbuf
*out
, int use_pack_protocol
,
263 const struct sha1_array
*extra
,
266 struct write_shallow_data data
;
269 data
.use_pack_protocol
= use_pack_protocol
;
272 for_each_commit_graft(write_one_shallow
, &data
);
275 for (i
= 0; i
< extra
->nr
; i
++) {
276 strbuf_addstr(out
, sha1_to_hex(extra
->sha1
[i
]));
277 strbuf_addch(out
, '\n');
283 int write_shallow_commits(struct strbuf
*out
, int use_pack_protocol
,
284 const struct sha1_array
*extra
)
286 return write_shallow_commits_1(out
, use_pack_protocol
, extra
, 0);
289 static struct tempfile temporary_shallow
;
291 const char *setup_temporary_shallow(const struct sha1_array
*extra
)
293 struct strbuf sb
= STRBUF_INIT
;
296 if (write_shallow_commits(&sb
, 0, extra
)) {
297 fd
= xmks_tempfile(&temporary_shallow
, git_path("shallow_XXXXXX"));
299 if (write_in_full(fd
, sb
.buf
, sb
.len
) != sb
.len
)
300 die_errno("failed to write to %s",
301 get_tempfile_path(&temporary_shallow
));
302 close_tempfile(&temporary_shallow
);
304 return get_tempfile_path(&temporary_shallow
);
307 * is_repository_shallow() sees empty string as "no shallow
310 return get_tempfile_path(&temporary_shallow
);
313 void setup_alternate_shallow(struct lock_file
*shallow_lock
,
314 const char **alternate_shallow_file
,
315 const struct sha1_array
*extra
)
317 struct strbuf sb
= STRBUF_INIT
;
320 fd
= hold_lock_file_for_update(shallow_lock
, git_path_shallow(),
322 check_shallow_file_for_update();
323 if (write_shallow_commits(&sb
, 0, extra
)) {
324 if (write_in_full(fd
, sb
.buf
, sb
.len
) != sb
.len
)
325 die_errno("failed to write to %s",
326 get_lock_file_path(shallow_lock
));
327 *alternate_shallow_file
= get_lock_file_path(shallow_lock
);
330 * is_repository_shallow() sees empty string as "no
333 *alternate_shallow_file
= "";
337 static int advertise_shallow_grafts_cb(const struct commit_graft
*graft
, void *cb
)
340 if (graft
->nr_parent
== -1)
341 packet_write_fmt(fd
, "shallow %s\n", oid_to_hex(&graft
->oid
));
345 void advertise_shallow_grafts(int fd
)
347 if (!is_repository_shallow())
349 for_each_commit_graft(advertise_shallow_grafts_cb
, &fd
);
353 * mark_reachable_objects() should have been run prior to this and all
354 * reachable commits marked as "SEEN".
356 void prune_shallow(int show_only
)
358 static struct lock_file shallow_lock
;
359 struct strbuf sb
= STRBUF_INIT
;
363 write_shallow_commits_1(&sb
, 0, NULL
, SEEN_ONLY
| VERBOSE
);
367 fd
= hold_lock_file_for_update(&shallow_lock
, git_path_shallow(),
369 check_shallow_file_for_update();
370 if (write_shallow_commits_1(&sb
, 0, NULL
, SEEN_ONLY
)) {
371 if (write_in_full(fd
, sb
.buf
, sb
.len
) != sb
.len
)
372 die_errno("failed to write to %s",
373 get_lock_file_path(&shallow_lock
));
374 commit_lock_file(&shallow_lock
);
376 unlink(git_path_shallow());
377 rollback_lock_file(&shallow_lock
);
382 struct trace_key trace_shallow
= TRACE_KEY_INIT(SHALLOW
);
385 * Step 1, split sender shallow commits into "ours" and "theirs"
386 * Step 2, clean "ours" based on .git/shallow
388 void prepare_shallow_info(struct shallow_info
*info
, struct sha1_array
*sa
)
391 trace_printf_key(&trace_shallow
, "shallow: prepare_shallow_info\n");
392 memset(info
, 0, sizeof(*info
));
396 ALLOC_ARRAY(info
->ours
, sa
->nr
);
397 ALLOC_ARRAY(info
->theirs
, sa
->nr
);
398 for (i
= 0; i
< sa
->nr
; i
++) {
399 if (has_sha1_file(sa
->sha1
[i
])) {
400 struct commit_graft
*graft
;
401 graft
= lookup_commit_graft(sa
->sha1
[i
]);
402 if (graft
&& graft
->nr_parent
< 0)
404 info
->ours
[info
->nr_ours
++] = i
;
406 info
->theirs
[info
->nr_theirs
++] = i
;
410 void clear_shallow_info(struct shallow_info
*info
)
416 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
418 void remove_nonexistent_theirs_shallow(struct shallow_info
*info
)
420 unsigned char (*sha1
)[20] = info
->shallow
->sha1
;
422 trace_printf_key(&trace_shallow
, "shallow: remove_nonexistent_theirs_shallow\n");
423 for (i
= dst
= 0; i
< info
->nr_theirs
; i
++) {
425 info
->theirs
[dst
] = info
->theirs
[i
];
426 if (has_sha1_file(sha1
[info
->theirs
[i
]]))
429 info
->nr_theirs
= dst
;
432 define_commit_slab(ref_bitmap
, uint32_t *);
434 #define POOL_SIZE (512 * 1024)
437 struct ref_bitmap ref_bitmap
;
444 static uint32_t *paint_alloc(struct paint_info
*info
)
446 unsigned nr
= (info
->nr_bits
+ 31) / 32;
447 unsigned size
= nr
* sizeof(uint32_t);
449 if (!info
->pool_count
|| size
> info
->end
- info
->free
) {
450 if (size
> POOL_SIZE
)
451 die("BUG: pool size too small for %d in paint_alloc()",
454 REALLOC_ARRAY(info
->pools
, info
->pool_count
);
455 info
->free
= xmalloc(POOL_SIZE
);
456 info
->pools
[info
->pool_count
- 1] = info
->free
;
457 info
->end
= info
->free
+ POOL_SIZE
;
465 * Given a commit SHA-1, walk down to parents until either SEEN,
466 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
467 * all walked commits.
469 static void paint_down(struct paint_info
*info
, const unsigned char *sha1
,
473 struct commit_list
*head
= NULL
;
474 int bitmap_nr
= (info
->nr_bits
+ 31) / 32;
475 size_t bitmap_size
= st_mult(sizeof(uint32_t), bitmap_nr
);
476 uint32_t *tmp
= xmalloc(bitmap_size
); /* to be freed before return */
477 uint32_t *bitmap
= paint_alloc(info
);
478 struct commit
*c
= lookup_commit_reference_gently(sha1
, 1);
481 memset(bitmap
, 0, bitmap_size
);
482 bitmap
[id
/ 32] |= (1U << (id
% 32));
483 commit_list_insert(c
, &head
);
485 struct commit_list
*p
;
486 struct commit
*c
= pop_commit(&head
);
487 uint32_t **refs
= ref_bitmap_at(&info
->ref_bitmap
, c
);
489 /* XXX check "UNINTERESTING" from pack bitmaps if available */
490 if (c
->object
.flags
& (SEEN
| UNINTERESTING
))
493 c
->object
.flags
|= SEEN
;
498 memcpy(tmp
, *refs
, bitmap_size
);
499 for (i
= 0; i
< bitmap_nr
; i
++)
501 if (memcmp(tmp
, *refs
, bitmap_size
)) {
502 *refs
= paint_alloc(info
);
503 memcpy(*refs
, tmp
, bitmap_size
);
507 if (c
->object
.flags
& BOTTOM
)
511 die("unable to parse commit %s",
512 oid_to_hex(&c
->object
.oid
));
514 for (p
= c
->parents
; p
; p
= p
->next
) {
515 if (p
->item
->object
.flags
& SEEN
)
517 commit_list_insert(p
->item
, &head
);
521 nr
= get_max_object_index();
522 for (i
= 0; i
< nr
; i
++) {
523 struct object
*o
= get_indexed_object(i
);
524 if (o
&& o
->type
== OBJ_COMMIT
)
531 static int mark_uninteresting(const char *refname
, const struct object_id
*oid
,
532 int flags
, void *cb_data
)
534 struct commit
*commit
= lookup_commit_reference_gently(oid
->hash
, 1);
537 commit
->object
.flags
|= UNINTERESTING
;
538 mark_parents_uninteresting(commit
);
542 static void post_assign_shallow(struct shallow_info
*info
,
543 struct ref_bitmap
*ref_bitmap
,
546 * Step 6(+7), associate shallow commits with new refs
548 * info->ref must be initialized before calling this function.
550 * If used is not NULL, it's an array of info->shallow->nr
551 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
552 * m-th shallow commit from info->shallow.
554 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
555 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
556 * the ref needs some shallow commits from either info->ours or
559 void assign_shallow_commits_to_refs(struct shallow_info
*info
,
560 uint32_t **used
, int *ref_status
)
562 unsigned char (*sha1
)[20] = info
->shallow
->sha1
;
563 struct sha1_array
*ref
= info
->ref
;
565 int *shallow
, nr_shallow
= 0;
566 struct paint_info pi
;
568 trace_printf_key(&trace_shallow
, "shallow: assign_shallow_commits_to_refs\n");
569 ALLOC_ARRAY(shallow
, info
->nr_ours
+ info
->nr_theirs
);
570 for (i
= 0; i
< info
->nr_ours
; i
++)
571 shallow
[nr_shallow
++] = info
->ours
[i
];
572 for (i
= 0; i
< info
->nr_theirs
; i
++)
573 shallow
[nr_shallow
++] = info
->theirs
[i
];
576 * Prepare the commit graph to track what refs can reach what
577 * (new) shallow commits.
579 nr
= get_max_object_index();
580 for (i
= 0; i
< nr
; i
++) {
581 struct object
*o
= get_indexed_object(i
);
582 if (!o
|| o
->type
!= OBJ_COMMIT
)
585 o
->flags
&= ~(UNINTERESTING
| BOTTOM
| SEEN
);
588 memset(&pi
, 0, sizeof(pi
));
589 init_ref_bitmap(&pi
.ref_bitmap
);
590 pi
.nr_bits
= ref
->nr
;
593 * "--not --all" to cut short the traversal if new refs
594 * connect to old refs. If not (e.g. force ref updates) it'll
595 * have to go down to the current shallow commits.
597 head_ref(mark_uninteresting
, NULL
);
598 for_each_ref(mark_uninteresting
, NULL
);
600 /* Mark potential bottoms so we won't go out of bound */
601 for (i
= 0; i
< nr_shallow
; i
++) {
602 struct commit
*c
= lookup_commit(sha1
[shallow
[i
]]);
603 c
->object
.flags
|= BOTTOM
;
606 for (i
= 0; i
< ref
->nr
; i
++)
607 paint_down(&pi
, ref
->sha1
[i
], i
);
610 int bitmap_size
= ((pi
.nr_bits
+ 31) / 32) * sizeof(uint32_t);
611 memset(used
, 0, sizeof(*used
) * info
->shallow
->nr
);
612 for (i
= 0; i
< nr_shallow
; i
++) {
613 const struct commit
*c
= lookup_commit(sha1
[shallow
[i
]]);
614 uint32_t **map
= ref_bitmap_at(&pi
.ref_bitmap
, c
);
616 used
[shallow
[i
]] = xmemdupz(*map
, bitmap_size
);
619 * unreachable shallow commits are not removed from
620 * "ours" and "theirs". The user is supposed to run
621 * step 7 on every ref separately and not trust "ours"
622 * and "theirs" any more.
625 post_assign_shallow(info
, &pi
.ref_bitmap
, ref_status
);
627 clear_ref_bitmap(&pi
.ref_bitmap
);
628 for (i
= 0; i
< pi
.pool_count
; i
++)
634 struct commit_array
{
635 struct commit
**commits
;
639 static int add_ref(const char *refname
, const struct object_id
*oid
,
640 int flags
, void *cb_data
)
642 struct commit_array
*ca
= cb_data
;
643 ALLOC_GROW(ca
->commits
, ca
->nr
+ 1, ca
->alloc
);
644 ca
->commits
[ca
->nr
] = lookup_commit_reference_gently(oid
->hash
, 1);
645 if (ca
->commits
[ca
->nr
])
650 static void update_refstatus(int *ref_status
, int nr
, uint32_t *bitmap
)
655 for (i
= 0; i
< nr
; i
++)
656 if (bitmap
[i
/ 32] & (1U << (i
% 32)))
661 * Step 7, reachability test on "ours" at commit level
663 static void post_assign_shallow(struct shallow_info
*info
,
664 struct ref_bitmap
*ref_bitmap
,
667 unsigned char (*sha1
)[20] = info
->shallow
->sha1
;
671 int bitmap_nr
= (info
->ref
->nr
+ 31) / 32;
672 struct commit_array ca
;
674 trace_printf_key(&trace_shallow
, "shallow: post_assign_shallow\n");
676 memset(ref_status
, 0, sizeof(*ref_status
) * info
->ref
->nr
);
678 /* Remove unreachable shallow commits from "theirs" */
679 for (i
= dst
= 0; i
< info
->nr_theirs
; i
++) {
681 info
->theirs
[dst
] = info
->theirs
[i
];
682 c
= lookup_commit(sha1
[info
->theirs
[i
]]);
683 bitmap
= ref_bitmap_at(ref_bitmap
, c
);
686 for (j
= 0; j
< bitmap_nr
; j
++)
688 update_refstatus(ref_status
, info
->ref
->nr
, *bitmap
);
693 info
->nr_theirs
= dst
;
695 memset(&ca
, 0, sizeof(ca
));
696 head_ref(add_ref
, &ca
);
697 for_each_ref(add_ref
, &ca
);
699 /* Remove unreachable shallow commits from "ours" */
700 for (i
= dst
= 0; i
< info
->nr_ours
; i
++) {
702 info
->ours
[dst
] = info
->ours
[i
];
703 c
= lookup_commit(sha1
[info
->ours
[i
]]);
704 bitmap
= ref_bitmap_at(ref_bitmap
, c
);
707 for (j
= 0; j
< bitmap_nr
; j
++)
709 /* Step 7, reachability test at commit level */
710 !in_merge_bases_many(c
, ca
.nr
, ca
.commits
)) {
711 update_refstatus(ref_status
, info
->ref
->nr
, *bitmap
);
721 /* (Delayed) step 7, reachability test at commit level */
722 int delayed_reachability_test(struct shallow_info
*si
, int c
)
724 if (si
->need_reachability_test
[c
]) {
725 struct commit
*commit
= lookup_commit(si
->shallow
->sha1
[c
]);
728 struct commit_array ca
;
730 memset(&ca
, 0, sizeof(ca
));
731 head_ref(add_ref
, &ca
);
732 for_each_ref(add_ref
, &ca
);
733 si
->commits
= ca
.commits
;
734 si
->nr_commits
= ca
.nr
;
737 si
->reachable
[c
] = in_merge_bases_many(commit
,
740 si
->need_reachability_test
[c
] = 0;
742 return si
->reachable
[c
];