11 #include "fetch-pack.h"
13 #include "run-command.h"
15 #include "transport.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
21 static int transfer_unpack_limit
= -1;
22 static int fetch_unpack_limit
= -1;
23 static int unpack_limit
= 100;
24 static int prefer_ofs_delta
= 1;
26 static int deepen_since_ok
;
27 static int deepen_not_ok
;
28 static int fetch_fsck_objects
= -1;
29 static int transfer_fsck_objects
= -1;
30 static int agent_supported
;
31 static struct lock_file shallow_lock
;
32 static const char *alternate_shallow_file
;
34 /* Remember to update object flag allocation in object.h */
35 #define COMPLETE (1U << 0)
36 #define COMMON (1U << 1)
37 #define COMMON_REF (1U << 2)
38 #define SEEN (1U << 3)
39 #define POPPED (1U << 4)
40 #define ALTERNATE (1U << 5)
45 * After sending this many "have"s if we do not get any new ACK , we
46 * give up traversing our history.
48 #define MAX_IN_VAIN 256
50 static struct prio_queue rev_list
= { compare_commits_by_commit_date
};
51 static int non_common_revs
, multi_ack
, use_sideband
;
52 /* Allow specifying sha1 if it is a ref tip. */
53 #define ALLOW_TIP_SHA1 01
54 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
55 #define ALLOW_REACHABLE_SHA1 02
56 static unsigned int allow_unadvertised_object_request
;
58 __attribute__((format (printf
, 2, 3)))
59 static inline void print_verbose(const struct fetch_pack_args
*args
,
67 va_start(params
, fmt
);
68 vfprintf(stderr
, fmt
, params
);
73 struct alternate_object_cache
{
74 struct object
**items
;
78 static void cache_one_alternate(const char *refname
,
79 const struct object_id
*oid
,
82 struct alternate_object_cache
*cache
= vcache
;
83 struct object
*obj
= parse_object(oid
);
85 if (!obj
|| (obj
->flags
& ALTERNATE
))
88 obj
->flags
|= ALTERNATE
;
89 ALLOC_GROW(cache
->items
, cache
->nr
+ 1, cache
->alloc
);
90 cache
->items
[cache
->nr
++] = obj
;
93 static void for_each_cached_alternate(void (*cb
)(struct object
*))
95 static int initialized
;
96 static struct alternate_object_cache cache
;
100 for_each_alternate_ref(cache_one_alternate
, &cache
);
104 for (i
= 0; i
< cache
.nr
; i
++)
108 static void rev_list_push(struct commit
*commit
, int mark
)
110 if (!(commit
->object
.flags
& mark
)) {
111 commit
->object
.flags
|= mark
;
113 if (parse_commit(commit
))
116 prio_queue_put(&rev_list
, commit
);
118 if (!(commit
->object
.flags
& COMMON
))
123 static int rev_list_insert_ref(const char *refname
, const struct object_id
*oid
)
125 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
127 if (o
&& o
->type
== OBJ_COMMIT
)
128 rev_list_push((struct commit
*)o
, SEEN
);
133 static int rev_list_insert_ref_oid(const char *refname
, const struct object_id
*oid
,
134 int flag
, void *cb_data
)
136 return rev_list_insert_ref(refname
, oid
);
139 static int clear_marks(const char *refname
, const struct object_id
*oid
,
140 int flag
, void *cb_data
)
142 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
144 if (o
&& o
->type
== OBJ_COMMIT
)
145 clear_commit_marks((struct commit
*)o
,
146 COMMON
| COMMON_REF
| SEEN
| POPPED
);
151 This function marks a rev and its ancestors as common.
152 In some cases, it is desirable to mark only the ancestors (for example
153 when only the server does not yet know that they are common).
156 static void mark_common(struct commit
*commit
,
157 int ancestors_only
, int dont_parse
)
159 if (commit
!= NULL
&& !(commit
->object
.flags
& COMMON
)) {
160 struct object
*o
= (struct object
*)commit
;
165 if (!(o
->flags
& SEEN
))
166 rev_list_push(commit
, SEEN
);
168 struct commit_list
*parents
;
170 if (!ancestors_only
&& !(o
->flags
& POPPED
))
172 if (!o
->parsed
&& !dont_parse
)
173 if (parse_commit(commit
))
176 for (parents
= commit
->parents
;
178 parents
= parents
->next
)
179 mark_common(parents
->item
, 0, dont_parse
);
185 Get the next rev to send, ignoring the common.
188 static const struct object_id
*get_rev(void)
190 struct commit
*commit
= NULL
;
192 while (commit
== NULL
) {
194 struct commit_list
*parents
;
196 if (rev_list
.nr
== 0 || non_common_revs
== 0)
199 commit
= prio_queue_get(&rev_list
);
200 parse_commit(commit
);
201 parents
= commit
->parents
;
203 commit
->object
.flags
|= POPPED
;
204 if (!(commit
->object
.flags
& COMMON
))
207 if (commit
->object
.flags
& COMMON
) {
208 /* do not send "have", and ignore ancestors */
210 mark
= COMMON
| SEEN
;
211 } else if (commit
->object
.flags
& COMMON_REF
)
212 /* send "have", and ignore ancestors */
213 mark
= COMMON
| SEEN
;
215 /* send "have", also for its ancestors */
219 if (!(parents
->item
->object
.flags
& SEEN
))
220 rev_list_push(parents
->item
, mark
);
222 mark_common(parents
->item
, 1, 0);
223 parents
= parents
->next
;
227 return &commit
->object
.oid
;
238 static void consume_shallow_list(struct fetch_pack_args
*args
, int fd
)
240 if (args
->stateless_rpc
&& args
->deepen
) {
241 /* If we sent a depth we will get back "duplicate"
242 * shallow and unshallow commands every time there
243 * is a block of have lines exchanged.
246 while ((line
= packet_read_line(fd
, NULL
))) {
247 if (starts_with(line
, "shallow "))
249 if (starts_with(line
, "unshallow "))
251 die(_("git fetch-pack: expected shallow list"));
256 static enum ack_type
get_ack(int fd
, struct object_id
*result_oid
)
259 char *line
= packet_read_line(fd
, &len
);
263 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
264 if (!strcmp(line
, "NAK"))
266 if (skip_prefix(line
, "ACK ", &arg
)) {
267 if (!get_oid_hex(arg
, result_oid
)) {
272 if (strstr(arg
, "continue"))
274 if (strstr(arg
, "common"))
276 if (strstr(arg
, "ready"))
281 if (skip_prefix(line
, "ERR ", &arg
))
282 die(_("remote error: %s"), arg
);
283 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line
);
286 static void send_request(struct fetch_pack_args
*args
,
287 int fd
, struct strbuf
*buf
)
289 if (args
->stateless_rpc
) {
290 send_sideband(fd
, -1, buf
->buf
, buf
->len
, LARGE_PACKET_MAX
);
293 write_or_die(fd
, buf
->buf
, buf
->len
);
296 static void insert_one_alternate_object(struct object
*obj
)
298 rev_list_insert_ref(NULL
, &obj
->oid
);
301 #define INITIAL_FLUSH 16
302 #define PIPESAFE_FLUSH 32
303 #define LARGE_FLUSH 16384
305 static int next_flush(struct fetch_pack_args
*args
, int count
)
307 if (args
->stateless_rpc
) {
308 if (count
< LARGE_FLUSH
)
311 count
= count
* 11 / 10;
313 if (count
< PIPESAFE_FLUSH
)
316 count
+= PIPESAFE_FLUSH
;
321 static int find_common(struct fetch_pack_args
*args
,
322 int fd
[2], struct object_id
*result_oid
,
326 int count
= 0, flushes
= 0, flush_at
= INITIAL_FLUSH
, retval
;
327 const struct object_id
*oid
;
328 unsigned in_vain
= 0;
329 int got_continue
= 0;
331 struct strbuf req_buf
= STRBUF_INIT
;
332 size_t state_len
= 0;
334 if (args
->stateless_rpc
&& multi_ack
== 1)
335 die(_("--stateless-rpc requires multi_ack_detailed"));
337 for_each_ref(clear_marks
, NULL
);
340 for_each_ref(rev_list_insert_ref_oid
, NULL
);
341 for_each_cached_alternate(insert_one_alternate_object
);
344 for ( ; refs
; refs
= refs
->next
) {
345 struct object_id
*remote
= &refs
->old_oid
;
346 const char *remote_hex
;
350 * If that object is complete (i.e. it is an ancestor of a
351 * local ref), we tell them we have it but do not have to
352 * tell them about its ancestors, which they already know
355 * We use lookup_object here because we are only
356 * interested in the case we *know* the object is
357 * reachable and we have already scanned it.
359 if (((o
= lookup_object(remote
->hash
)) != NULL
) &&
360 (o
->flags
& COMPLETE
)) {
364 remote_hex
= oid_to_hex(remote
);
366 struct strbuf c
= STRBUF_INIT
;
367 if (multi_ack
== 2) strbuf_addstr(&c
, " multi_ack_detailed");
368 if (multi_ack
== 1) strbuf_addstr(&c
, " multi_ack");
369 if (no_done
) strbuf_addstr(&c
, " no-done");
370 if (use_sideband
== 2) strbuf_addstr(&c
, " side-band-64k");
371 if (use_sideband
== 1) strbuf_addstr(&c
, " side-band");
372 if (args
->deepen_relative
) strbuf_addstr(&c
, " deepen-relative");
373 if (args
->use_thin_pack
) strbuf_addstr(&c
, " thin-pack");
374 if (args
->no_progress
) strbuf_addstr(&c
, " no-progress");
375 if (args
->include_tag
) strbuf_addstr(&c
, " include-tag");
376 if (prefer_ofs_delta
) strbuf_addstr(&c
, " ofs-delta");
377 if (deepen_since_ok
) strbuf_addstr(&c
, " deepen-since");
378 if (deepen_not_ok
) strbuf_addstr(&c
, " deepen-not");
379 if (agent_supported
) strbuf_addf(&c
, " agent=%s",
380 git_user_agent_sanitized());
381 packet_buf_write(&req_buf
, "want %s%s\n", remote_hex
, c
.buf
);
384 packet_buf_write(&req_buf
, "want %s\n", remote_hex
);
389 strbuf_release(&req_buf
);
394 if (is_repository_shallow())
395 write_shallow_commits(&req_buf
, 1, NULL
);
397 packet_buf_write(&req_buf
, "deepen %d", args
->depth
);
398 if (args
->deepen_since
) {
399 timestamp_t max_age
= approxidate(args
->deepen_since
);
400 packet_buf_write(&req_buf
, "deepen-since %"PRItime
, max_age
);
402 if (args
->deepen_not
) {
404 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
405 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
406 packet_buf_write(&req_buf
, "deepen-not %s", s
->string
);
409 packet_buf_flush(&req_buf
);
410 state_len
= req_buf
.len
;
415 struct object_id oid
;
417 send_request(args
, fd
[1], &req_buf
);
418 while ((line
= packet_read_line(fd
[0], NULL
))) {
419 if (skip_prefix(line
, "shallow ", &arg
)) {
420 if (get_oid_hex(arg
, &oid
))
421 die(_("invalid shallow line: %s"), line
);
422 register_shallow(&oid
);
425 if (skip_prefix(line
, "unshallow ", &arg
)) {
426 if (get_oid_hex(arg
, &oid
))
427 die(_("invalid unshallow line: %s"), line
);
428 if (!lookup_object(oid
.hash
))
429 die(_("object not found: %s"), line
);
430 /* make sure that it is parsed as shallow */
431 if (!parse_object(&oid
))
432 die(_("error in object: %s"), line
);
433 if (unregister_shallow(&oid
))
434 die(_("no shallow found: %s"), line
);
437 die(_("expected shallow/unshallow, got %s"), line
);
439 } else if (!args
->stateless_rpc
)
440 send_request(args
, fd
[1], &req_buf
);
442 if (!args
->stateless_rpc
) {
443 /* If we aren't using the stateless-rpc interface
444 * we don't need to retain the headers.
446 strbuf_setlen(&req_buf
, 0);
452 while ((oid
= get_rev())) {
453 packet_buf_write(&req_buf
, "have %s\n", oid_to_hex(oid
));
454 print_verbose(args
, "have %s", oid_to_hex(oid
));
456 if (flush_at
<= ++count
) {
459 packet_buf_flush(&req_buf
);
460 send_request(args
, fd
[1], &req_buf
);
461 strbuf_setlen(&req_buf
, state_len
);
463 flush_at
= next_flush(args
, count
);
466 * We keep one window "ahead" of the other side, and
467 * will wait for an ACK only on the next one
469 if (!args
->stateless_rpc
&& count
== INITIAL_FLUSH
)
472 consume_shallow_list(args
, fd
[0]);
474 ack
= get_ack(fd
[0], result_oid
);
476 print_verbose(args
, _("got %s %d %s"), "ack",
477 ack
, oid_to_hex(result_oid
));
487 struct commit
*commit
=
488 lookup_commit(result_oid
);
490 die(_("invalid commit %s"), oid_to_hex(result_oid
));
491 if (args
->stateless_rpc
493 && !(commit
->object
.flags
& COMMON
)) {
494 /* We need to replay the have for this object
495 * on the next RPC request so the peer knows
496 * it is in common with us.
498 const char *hex
= oid_to_hex(result_oid
);
499 packet_buf_write(&req_buf
, "have %s\n", hex
);
500 state_len
= req_buf
.len
;
502 * Reset in_vain because an ack
503 * for this commit has not been
507 } else if (!args
->stateless_rpc
508 || ack
!= ACK_common
)
510 mark_common(commit
, 0, 1);
513 if (ack
== ACK_ready
) {
514 clear_prio_queue(&rev_list
);
522 if (got_continue
&& MAX_IN_VAIN
< in_vain
) {
523 print_verbose(args
, _("giving up"));
529 if (!got_ready
|| !no_done
) {
530 packet_buf_write(&req_buf
, "done\n");
531 send_request(args
, fd
[1], &req_buf
);
533 print_verbose(args
, _("done"));
538 strbuf_release(&req_buf
);
540 if (!got_ready
|| !no_done
)
541 consume_shallow_list(args
, fd
[0]);
542 while (flushes
|| multi_ack
) {
543 int ack
= get_ack(fd
[0], result_oid
);
545 print_verbose(args
, _("got %s (%d) %s"), "ack",
546 ack
, oid_to_hex(result_oid
));
554 /* it is no error to fetch into a completely empty repo */
555 return count
? retval
: 0;
558 static struct commit_list
*complete
;
560 static int mark_complete(const struct object_id
*oid
)
562 struct object
*o
= parse_object(oid
);
564 while (o
&& o
->type
== OBJ_TAG
) {
565 struct tag
*t
= (struct tag
*) o
;
567 break; /* broken repository */
568 o
->flags
|= COMPLETE
;
569 o
= parse_object(&t
->tagged
->oid
);
571 if (o
&& o
->type
== OBJ_COMMIT
) {
572 struct commit
*commit
= (struct commit
*)o
;
573 if (!(commit
->object
.flags
& COMPLETE
)) {
574 commit
->object
.flags
|= COMPLETE
;
575 commit_list_insert(commit
, &complete
);
581 static int mark_complete_oid(const char *refname
, const struct object_id
*oid
,
582 int flag
, void *cb_data
)
584 return mark_complete(oid
);
587 static void mark_recent_complete_commits(struct fetch_pack_args
*args
,
590 while (complete
&& cutoff
<= complete
->item
->date
) {
591 print_verbose(args
, _("Marking %s as complete"),
592 oid_to_hex(&complete
->item
->object
.oid
));
593 pop_most_recent_commit(&complete
, COMPLETE
);
597 static void add_refs_to_oidset(struct oidset
*oids
, struct ref
*refs
)
599 for (; refs
; refs
= refs
->next
)
600 oidset_insert(oids
, &refs
->old_oid
);
603 static int tip_oids_contain(struct oidset
*tip_oids
,
604 struct ref
*unmatched
, struct ref
*newlist
,
605 const struct object_id
*id
)
608 * Note that this only looks at the ref lists the first time it's
609 * called. This works out in filter_refs() because even though it may
610 * add to "newlist" between calls, the additions will always be for
611 * oids that are already in the set.
613 if (!tip_oids
->map
.tablesize
) {
614 add_refs_to_oidset(tip_oids
, unmatched
);
615 add_refs_to_oidset(tip_oids
, newlist
);
617 return oidset_contains(tip_oids
, id
);
620 static void filter_refs(struct fetch_pack_args
*args
,
622 struct ref
**sought
, int nr_sought
)
624 struct ref
*newlist
= NULL
;
625 struct ref
**newtail
= &newlist
;
626 struct ref
*unmatched
= NULL
;
627 struct ref
*ref
, *next
;
628 struct oidset tip_oids
= OIDSET_INIT
;
632 for (ref
= *refs
; ref
; ref
= next
) {
636 if (starts_with(ref
->name
, "refs/") &&
637 check_refname_format(ref
->name
, 0))
640 while (i
< nr_sought
) {
641 int cmp
= strcmp(ref
->name
, sought
[i
]->name
);
643 break; /* definitely do not have it */
645 keep
= 1; /* definitely have it */
646 sought
[i
]->match_status
= REF_MATCHED
;
652 if (!keep
&& args
->fetch_all
&&
653 (!args
->deepen
|| !starts_with(ref
->name
, "refs/tags/")))
659 newtail
= &ref
->next
;
661 ref
->next
= unmatched
;
666 /* Append unmatched requests to the list */
667 for (i
= 0; i
< nr_sought
; i
++) {
668 struct object_id oid
;
672 if (ref
->match_status
!= REF_NOT_MATCHED
)
674 if (parse_oid_hex(ref
->name
, &oid
, &p
) ||
676 oidcmp(&oid
, &ref
->old_oid
))
679 if ((allow_unadvertised_object_request
&
680 (ALLOW_TIP_SHA1
| ALLOW_REACHABLE_SHA1
)) ||
681 tip_oids_contain(&tip_oids
, unmatched
, newlist
,
683 ref
->match_status
= REF_MATCHED
;
684 *newtail
= copy_ref(ref
);
685 newtail
= &(*newtail
)->next
;
687 ref
->match_status
= REF_UNADVERTISED_NOT_ALLOWED
;
691 oidset_clear(&tip_oids
);
692 for (ref
= unmatched
; ref
; ref
= next
) {
700 static void mark_alternate_complete(struct object
*obj
)
702 mark_complete(&obj
->oid
);
705 static int everything_local(struct fetch_pack_args
*args
,
707 struct ref
**sought
, int nr_sought
)
711 timestamp_t cutoff
= 0;
713 save_commit_buffer
= 0;
715 for (ref
= *refs
; ref
; ref
= ref
->next
) {
718 if (!has_object_file(&ref
->old_oid
))
721 o
= parse_object(&ref
->old_oid
);
725 /* We already have it -- which may mean that we were
726 * in sync with the other side at some time after
727 * that (it is OK if we guess wrong here).
729 if (o
->type
== OBJ_COMMIT
) {
730 struct commit
*commit
= (struct commit
*)o
;
731 if (!cutoff
|| cutoff
< commit
->date
)
732 cutoff
= commit
->date
;
737 for_each_ref(mark_complete_oid
, NULL
);
738 for_each_cached_alternate(mark_alternate_complete
);
739 commit_list_sort_by_date(&complete
);
741 mark_recent_complete_commits(args
, cutoff
);
745 * Mark all complete remote refs as common refs.
746 * Don't mark them common yet; the server has to be told so first.
748 for (ref
= *refs
; ref
; ref
= ref
->next
) {
749 struct object
*o
= deref_tag(lookup_object(ref
->old_oid
.hash
),
752 if (!o
|| o
->type
!= OBJ_COMMIT
|| !(o
->flags
& COMPLETE
))
755 if (!(o
->flags
& SEEN
)) {
756 rev_list_push((struct commit
*)o
, COMMON_REF
| SEEN
);
758 mark_common((struct commit
*)o
, 1, 1);
762 filter_refs(args
, refs
, sought
, nr_sought
);
764 for (retval
= 1, ref
= *refs
; ref
; ref
= ref
->next
) {
765 const struct object_id
*remote
= &ref
->old_oid
;
768 o
= lookup_object(remote
->hash
);
769 if (!o
|| !(o
->flags
& COMPLETE
)) {
771 print_verbose(args
, "want %s (%s)", oid_to_hex(remote
),
775 print_verbose(args
, _("already have %s (%s)"), oid_to_hex(remote
),
781 static int sideband_demux(int in
, int out
, void *data
)
786 ret
= recv_sideband("fetch-pack", xd
[0], out
);
791 static int get_pack(struct fetch_pack_args
*args
,
792 int xd
[2], char **pack_lockfile
)
795 int do_keep
= args
->keep_pack
;
796 const char *cmd_name
;
797 struct pack_header header
;
799 struct child_process cmd
= CHILD_PROCESS_INIT
;
802 memset(&demux
, 0, sizeof(demux
));
804 /* xd[] is talking with upload-pack; subprocess reads from
805 * xd[0], spits out band#2 to stderr, and feeds us band#1
806 * through demux->out.
808 demux
.proc
= sideband_demux
;
811 demux
.isolate_sigpipe
= 1;
812 if (start_async(&demux
))
813 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
818 if (!args
->keep_pack
&& unpack_limit
) {
820 if (read_pack_header(demux
.out
, &header
))
821 die(_("protocol error: bad pack header"));
823 if (ntohl(header
.hdr_entries
) < unpack_limit
)
829 if (alternate_shallow_file
) {
830 argv_array_push(&cmd
.args
, "--shallow-file");
831 argv_array_push(&cmd
.args
, alternate_shallow_file
);
837 cmd_name
= "index-pack";
838 argv_array_push(&cmd
.args
, cmd_name
);
839 argv_array_push(&cmd
.args
, "--stdin");
840 if (!args
->quiet
&& !args
->no_progress
)
841 argv_array_push(&cmd
.args
, "-v");
842 if (args
->use_thin_pack
)
843 argv_array_push(&cmd
.args
, "--fix-thin");
844 if (args
->lock_pack
|| unpack_limit
) {
845 char hostname
[HOST_NAME_MAX
+ 1];
846 if (xgethostname(hostname
, sizeof(hostname
)))
847 xsnprintf(hostname
, sizeof(hostname
), "localhost");
848 argv_array_pushf(&cmd
.args
,
849 "--keep=fetch-pack %"PRIuMAX
" on %s",
850 (uintmax_t)getpid(), hostname
);
852 if (args
->check_self_contained_and_connected
)
853 argv_array_push(&cmd
.args
, "--check-self-contained-and-connected");
856 cmd_name
= "unpack-objects";
857 argv_array_push(&cmd
.args
, cmd_name
);
858 if (args
->quiet
|| args
->no_progress
)
859 argv_array_push(&cmd
.args
, "-q");
860 args
->check_self_contained_and_connected
= 0;
864 argv_array_pushf(&cmd
.args
, "--pack_header=%"PRIu32
",%"PRIu32
,
865 ntohl(header
.hdr_version
),
866 ntohl(header
.hdr_entries
));
867 if (fetch_fsck_objects
>= 0
869 : transfer_fsck_objects
>= 0
870 ? transfer_fsck_objects
872 argv_array_push(&cmd
.args
, "--strict");
876 if (start_command(&cmd
))
877 die(_("fetch-pack: unable to fork off %s"), cmd_name
);
878 if (do_keep
&& pack_lockfile
) {
879 *pack_lockfile
= index_pack_lockfile(cmd
.out
);
884 /* Closed by start_command() */
887 ret
= finish_command(&cmd
);
888 if (!ret
|| (args
->check_self_contained_and_connected
&& ret
== 1))
889 args
->self_contained_and_connected
=
890 args
->check_self_contained_and_connected
&&
893 die(_("%s failed"), cmd_name
);
894 if (use_sideband
&& finish_async(&demux
))
895 die(_("error in sideband demultiplexer"));
899 static int cmp_ref_by_name(const void *a_
, const void *b_
)
901 const struct ref
*a
= *((const struct ref
**)a_
);
902 const struct ref
*b
= *((const struct ref
**)b_
);
903 return strcmp(a
->name
, b
->name
);
906 static struct ref
*do_fetch_pack(struct fetch_pack_args
*args
,
908 const struct ref
*orig_ref
,
909 struct ref
**sought
, int nr_sought
,
910 struct shallow_info
*si
,
911 char **pack_lockfile
)
913 struct ref
*ref
= copy_ref_list(orig_ref
);
914 struct object_id oid
;
915 const char *agent_feature
;
918 sort_ref_list(&ref
, ref_compare_name
);
919 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
921 if ((args
->depth
> 0 || is_repository_shallow()) && !server_supports("shallow"))
922 die(_("Server does not support shallow clients"));
923 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
925 if (server_supports("multi_ack_detailed")) {
926 print_verbose(args
, _("Server supports multi_ack_detailed"));
928 if (server_supports("no-done")) {
929 print_verbose(args
, _("Server supports no-done"));
930 if (args
->stateless_rpc
)
934 else if (server_supports("multi_ack")) {
935 print_verbose(args
, _("Server supports multi_ack"));
938 if (server_supports("side-band-64k")) {
939 print_verbose(args
, _("Server supports side-band-64k"));
942 else if (server_supports("side-band")) {
943 print_verbose(args
, _("Server supports side-band"));
946 if (server_supports("allow-tip-sha1-in-want")) {
947 print_verbose(args
, _("Server supports allow-tip-sha1-in-want"));
948 allow_unadvertised_object_request
|= ALLOW_TIP_SHA1
;
950 if (server_supports("allow-reachable-sha1-in-want")) {
951 print_verbose(args
, _("Server supports allow-reachable-sha1-in-want"));
952 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
954 if (!server_supports("thin-pack"))
955 args
->use_thin_pack
= 0;
956 if (!server_supports("no-progress"))
957 args
->no_progress
= 0;
958 if (!server_supports("include-tag"))
959 args
->include_tag
= 0;
960 if (server_supports("ofs-delta"))
961 print_verbose(args
, _("Server supports ofs-delta"));
963 prefer_ofs_delta
= 0;
965 if ((agent_feature
= server_feature_value("agent", &agent_len
))) {
968 print_verbose(args
, _("Server version is %.*s"),
969 agent_len
, agent_feature
);
971 if (server_supports("deepen-since"))
973 else if (args
->deepen_since
)
974 die(_("Server does not support --shallow-since"));
975 if (server_supports("deepen-not"))
977 else if (args
->deepen_not
)
978 die(_("Server does not support --shallow-exclude"));
979 if (!server_supports("deepen-relative") && args
->deepen_relative
)
980 die(_("Server does not support --deepen"));
982 if (everything_local(args
, &ref
, sought
, nr_sought
)) {
986 if (find_common(args
, fd
, &oid
, ref
) < 0)
987 if (!args
->keep_pack
)
988 /* When cloning, it is not unusual to have
991 warning(_("no common commits"));
993 if (args
->stateless_rpc
)
996 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
,
998 else if (si
->nr_ours
|| si
->nr_theirs
)
999 alternate_shallow_file
= setup_temporary_shallow(si
->shallow
);
1001 alternate_shallow_file
= NULL
;
1002 if (get_pack(args
, fd
, pack_lockfile
))
1003 die(_("git fetch-pack: fetch failed."));
1009 static void fetch_pack_config(void)
1011 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit
);
1012 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit
);
1013 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta
);
1014 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects
);
1015 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects
);
1017 git_config(git_default_config
, NULL
);
1020 static void fetch_pack_setup(void)
1022 static int did_setup
;
1025 fetch_pack_config();
1026 if (0 <= transfer_unpack_limit
)
1027 unpack_limit
= transfer_unpack_limit
;
1028 else if (0 <= fetch_unpack_limit
)
1029 unpack_limit
= fetch_unpack_limit
;
1033 static int remove_duplicates_in_refs(struct ref
**ref
, int nr
)
1035 struct string_list names
= STRING_LIST_INIT_NODUP
;
1038 for (src
= dst
= 0; src
< nr
; src
++) {
1039 struct string_list_item
*item
;
1040 item
= string_list_insert(&names
, ref
[src
]->name
);
1042 continue; /* already have it */
1043 item
->util
= ref
[src
];
1045 ref
[dst
] = ref
[src
];
1048 for (src
= dst
; src
< nr
; src
++)
1050 string_list_clear(&names
, 0);
1054 static void update_shallow(struct fetch_pack_args
*args
,
1055 struct ref
**sought
, int nr_sought
,
1056 struct shallow_info
*si
)
1058 struct oid_array ref
= OID_ARRAY_INIT
;
1062 if (args
->deepen
&& alternate_shallow_file
) {
1063 if (*alternate_shallow_file
== '\0') { /* --unshallow */
1064 unlink_or_warn(git_path_shallow());
1065 rollback_lock_file(&shallow_lock
);
1067 commit_lock_file(&shallow_lock
);
1071 if (!si
->shallow
|| !si
->shallow
->nr
)
1074 if (args
->cloning
) {
1076 * remote is shallow, but this is a clone, there are
1077 * no objects in repo to worry about. Accept any
1078 * shallow points that exist in the pack (iow in repo
1079 * after get_pack() and reprepare_packed_git())
1081 struct oid_array extra
= OID_ARRAY_INIT
;
1082 struct object_id
*oid
= si
->shallow
->oid
;
1083 for (i
= 0; i
< si
->shallow
->nr
; i
++)
1084 if (has_object_file(&oid
[i
]))
1085 oid_array_append(&extra
, &oid
[i
]);
1087 setup_alternate_shallow(&shallow_lock
,
1088 &alternate_shallow_file
,
1090 commit_lock_file(&shallow_lock
);
1092 oid_array_clear(&extra
);
1096 if (!si
->nr_ours
&& !si
->nr_theirs
)
1099 remove_nonexistent_theirs_shallow(si
);
1100 if (!si
->nr_ours
&& !si
->nr_theirs
)
1102 for (i
= 0; i
< nr_sought
; i
++)
1103 oid_array_append(&ref
, &sought
[i
]->old_oid
);
1106 if (args
->update_shallow
) {
1108 * remote is also shallow, .git/shallow may be updated
1109 * so all refs can be accepted. Make sure we only add
1110 * shallow roots that are actually reachable from new
1113 struct oid_array extra
= OID_ARRAY_INIT
;
1114 struct object_id
*oid
= si
->shallow
->oid
;
1115 assign_shallow_commits_to_refs(si
, NULL
, NULL
);
1116 if (!si
->nr_ours
&& !si
->nr_theirs
) {
1117 oid_array_clear(&ref
);
1120 for (i
= 0; i
< si
->nr_ours
; i
++)
1121 oid_array_append(&extra
, &oid
[si
->ours
[i
]]);
1122 for (i
= 0; i
< si
->nr_theirs
; i
++)
1123 oid_array_append(&extra
, &oid
[si
->theirs
[i
]]);
1124 setup_alternate_shallow(&shallow_lock
,
1125 &alternate_shallow_file
,
1127 commit_lock_file(&shallow_lock
);
1128 oid_array_clear(&extra
);
1129 oid_array_clear(&ref
);
1134 * remote is also shallow, check what ref is safe to update
1135 * without updating .git/shallow
1137 status
= xcalloc(nr_sought
, sizeof(*status
));
1138 assign_shallow_commits_to_refs(si
, NULL
, status
);
1139 if (si
->nr_ours
|| si
->nr_theirs
) {
1140 for (i
= 0; i
< nr_sought
; i
++)
1142 sought
[i
]->status
= REF_STATUS_REJECT_SHALLOW
;
1145 oid_array_clear(&ref
);
1148 struct ref
*fetch_pack(struct fetch_pack_args
*args
,
1149 int fd
[], struct child_process
*conn
,
1150 const struct ref
*ref
,
1152 struct ref
**sought
, int nr_sought
,
1153 struct oid_array
*shallow
,
1154 char **pack_lockfile
)
1156 struct ref
*ref_cpy
;
1157 struct shallow_info si
;
1161 nr_sought
= remove_duplicates_in_refs(sought
, nr_sought
);
1164 packet_flush(fd
[1]);
1165 die(_("no matching remote head"));
1167 prepare_shallow_info(&si
, shallow
);
1168 ref_cpy
= do_fetch_pack(args
, fd
, ref
, sought
, nr_sought
,
1169 &si
, pack_lockfile
);
1170 reprepare_packed_git();
1171 update_shallow(args
, sought
, nr_sought
, &si
);
1172 clear_shallow_info(&si
);
1176 int report_unmatched_refs(struct ref
**sought
, int nr_sought
)
1180 for (i
= 0; i
< nr_sought
; i
++) {
1183 switch (sought
[i
]->match_status
) {
1186 case REF_NOT_MATCHED
:
1187 error(_("no such remote ref %s"), sought
[i
]->name
);
1189 case REF_UNADVERTISED_NOT_ALLOWED
:
1190 error(_("Server does not allow request for unadvertised object %s"),