11 #include "fetch-pack.h"
13 #include "run-command.h"
15 #include "transport.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
22 static int transfer_unpack_limit
= -1;
23 static int fetch_unpack_limit
= -1;
24 static int unpack_limit
= 100;
25 static int prefer_ofs_delta
= 1;
27 static int deepen_since_ok
;
28 static int deepen_not_ok
;
29 static int fetch_fsck_objects
= -1;
30 static int transfer_fsck_objects
= -1;
31 static int agent_supported
;
32 static int server_supports_filtering
;
33 static struct lock_file shallow_lock
;
34 static const char *alternate_shallow_file
;
36 /* Remember to update object flag allocation in object.h */
37 #define COMPLETE (1U << 0)
38 #define COMMON (1U << 1)
39 #define COMMON_REF (1U << 2)
40 #define SEEN (1U << 3)
41 #define POPPED (1U << 4)
42 #define ALTERNATE (1U << 5)
47 * After sending this many "have"s if we do not get any new ACK , we
48 * give up traversing our history.
50 #define MAX_IN_VAIN 256
52 static struct prio_queue rev_list
= { compare_commits_by_commit_date
};
53 static int non_common_revs
, multi_ack
, use_sideband
;
54 /* Allow specifying sha1 if it is a ref tip. */
55 #define ALLOW_TIP_SHA1 01
56 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
57 #define ALLOW_REACHABLE_SHA1 02
58 static unsigned int allow_unadvertised_object_request
;
60 __attribute__((format (printf
, 2, 3)))
61 static inline void print_verbose(const struct fetch_pack_args
*args
,
69 va_start(params
, fmt
);
70 vfprintf(stderr
, fmt
, params
);
75 struct alternate_object_cache
{
76 struct object
**items
;
80 static void cache_one_alternate(const char *refname
,
81 const struct object_id
*oid
,
84 struct alternate_object_cache
*cache
= vcache
;
85 struct object
*obj
= parse_object(oid
);
87 if (!obj
|| (obj
->flags
& ALTERNATE
))
90 obj
->flags
|= ALTERNATE
;
91 ALLOC_GROW(cache
->items
, cache
->nr
+ 1, cache
->alloc
);
92 cache
->items
[cache
->nr
++] = obj
;
95 static void for_each_cached_alternate(void (*cb
)(struct object
*))
97 static int initialized
;
98 static struct alternate_object_cache cache
;
102 for_each_alternate_ref(cache_one_alternate
, &cache
);
106 for (i
= 0; i
< cache
.nr
; i
++)
110 static void rev_list_push(struct commit
*commit
, int mark
)
112 if (!(commit
->object
.flags
& mark
)) {
113 commit
->object
.flags
|= mark
;
115 if (parse_commit(commit
))
118 prio_queue_put(&rev_list
, commit
);
120 if (!(commit
->object
.flags
& COMMON
))
125 static int rev_list_insert_ref(const char *refname
, const struct object_id
*oid
)
127 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
129 if (o
&& o
->type
== OBJ_COMMIT
)
130 rev_list_push((struct commit
*)o
, SEEN
);
135 static int rev_list_insert_ref_oid(const char *refname
, const struct object_id
*oid
,
136 int flag
, void *cb_data
)
138 return rev_list_insert_ref(refname
, oid
);
141 static int clear_marks(const char *refname
, const struct object_id
*oid
,
142 int flag
, void *cb_data
)
144 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
146 if (o
&& o
->type
== OBJ_COMMIT
)
147 clear_commit_marks((struct commit
*)o
,
148 COMMON
| COMMON_REF
| SEEN
| POPPED
);
153 This function marks a rev and its ancestors as common.
154 In some cases, it is desirable to mark only the ancestors (for example
155 when only the server does not yet know that they are common).
158 static void mark_common(struct commit
*commit
,
159 int ancestors_only
, int dont_parse
)
161 if (commit
!= NULL
&& !(commit
->object
.flags
& COMMON
)) {
162 struct object
*o
= (struct object
*)commit
;
167 if (!(o
->flags
& SEEN
))
168 rev_list_push(commit
, SEEN
);
170 struct commit_list
*parents
;
172 if (!ancestors_only
&& !(o
->flags
& POPPED
))
174 if (!o
->parsed
&& !dont_parse
)
175 if (parse_commit(commit
))
178 for (parents
= commit
->parents
;
180 parents
= parents
->next
)
181 mark_common(parents
->item
, 0, dont_parse
);
187 Get the next rev to send, ignoring the common.
190 static const struct object_id
*get_rev(void)
192 struct commit
*commit
= NULL
;
194 while (commit
== NULL
) {
196 struct commit_list
*parents
;
198 if (rev_list
.nr
== 0 || non_common_revs
== 0)
201 commit
= prio_queue_get(&rev_list
);
202 parse_commit(commit
);
203 parents
= commit
->parents
;
205 commit
->object
.flags
|= POPPED
;
206 if (!(commit
->object
.flags
& COMMON
))
209 if (commit
->object
.flags
& COMMON
) {
210 /* do not send "have", and ignore ancestors */
212 mark
= COMMON
| SEEN
;
213 } else if (commit
->object
.flags
& COMMON_REF
)
214 /* send "have", and ignore ancestors */
215 mark
= COMMON
| SEEN
;
217 /* send "have", also for its ancestors */
221 if (!(parents
->item
->object
.flags
& SEEN
))
222 rev_list_push(parents
->item
, mark
);
224 mark_common(parents
->item
, 1, 0);
225 parents
= parents
->next
;
229 return &commit
->object
.oid
;
240 static void consume_shallow_list(struct fetch_pack_args
*args
, int fd
)
242 if (args
->stateless_rpc
&& args
->deepen
) {
243 /* If we sent a depth we will get back "duplicate"
244 * shallow and unshallow commands every time there
245 * is a block of have lines exchanged.
248 while ((line
= packet_read_line(fd
, NULL
))) {
249 if (starts_with(line
, "shallow "))
251 if (starts_with(line
, "unshallow "))
253 die(_("git fetch-pack: expected shallow list"));
258 static enum ack_type
get_ack(int fd
, struct object_id
*result_oid
)
261 char *line
= packet_read_line(fd
, &len
);
265 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
266 if (!strcmp(line
, "NAK"))
268 if (skip_prefix(line
, "ACK ", &arg
)) {
269 if (!get_oid_hex(arg
, result_oid
)) {
274 if (strstr(arg
, "continue"))
276 if (strstr(arg
, "common"))
278 if (strstr(arg
, "ready"))
283 if (skip_prefix(line
, "ERR ", &arg
))
284 die(_("remote error: %s"), arg
);
285 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line
);
288 static void send_request(struct fetch_pack_args
*args
,
289 int fd
, struct strbuf
*buf
)
291 if (args
->stateless_rpc
) {
292 send_sideband(fd
, -1, buf
->buf
, buf
->len
, LARGE_PACKET_MAX
);
295 write_or_die(fd
, buf
->buf
, buf
->len
);
298 static void insert_one_alternate_object(struct object
*obj
)
300 rev_list_insert_ref(NULL
, &obj
->oid
);
303 #define INITIAL_FLUSH 16
304 #define PIPESAFE_FLUSH 32
305 #define LARGE_FLUSH 16384
307 static int next_flush(struct fetch_pack_args
*args
, int count
)
309 if (args
->stateless_rpc
) {
310 if (count
< LARGE_FLUSH
)
313 count
= count
* 11 / 10;
315 if (count
< PIPESAFE_FLUSH
)
318 count
+= PIPESAFE_FLUSH
;
323 static int find_common(struct fetch_pack_args
*args
,
324 int fd
[2], struct object_id
*result_oid
,
328 int count
= 0, flushes
= 0, flush_at
= INITIAL_FLUSH
, retval
;
329 const struct object_id
*oid
;
330 unsigned in_vain
= 0;
331 int got_continue
= 0;
333 struct strbuf req_buf
= STRBUF_INIT
;
334 size_t state_len
= 0;
336 if (args
->stateless_rpc
&& multi_ack
== 1)
337 die(_("--stateless-rpc requires multi_ack_detailed"));
339 for_each_ref(clear_marks
, NULL
);
342 for_each_ref(rev_list_insert_ref_oid
, NULL
);
343 for_each_cached_alternate(insert_one_alternate_object
);
346 for ( ; refs
; refs
= refs
->next
) {
347 struct object_id
*remote
= &refs
->old_oid
;
348 const char *remote_hex
;
352 * If that object is complete (i.e. it is an ancestor of a
353 * local ref), we tell them we have it but do not have to
354 * tell them about its ancestors, which they already know
357 * We use lookup_object here because we are only
358 * interested in the case we *know* the object is
359 * reachable and we have already scanned it.
361 if (((o
= lookup_object(remote
->hash
)) != NULL
) &&
362 (o
->flags
& COMPLETE
)) {
366 remote_hex
= oid_to_hex(remote
);
368 struct strbuf c
= STRBUF_INIT
;
369 if (multi_ack
== 2) strbuf_addstr(&c
, " multi_ack_detailed");
370 if (multi_ack
== 1) strbuf_addstr(&c
, " multi_ack");
371 if (no_done
) strbuf_addstr(&c
, " no-done");
372 if (use_sideband
== 2) strbuf_addstr(&c
, " side-band-64k");
373 if (use_sideband
== 1) strbuf_addstr(&c
, " side-band");
374 if (args
->deepen_relative
) strbuf_addstr(&c
, " deepen-relative");
375 if (args
->use_thin_pack
) strbuf_addstr(&c
, " thin-pack");
376 if (args
->no_progress
) strbuf_addstr(&c
, " no-progress");
377 if (args
->include_tag
) strbuf_addstr(&c
, " include-tag");
378 if (prefer_ofs_delta
) strbuf_addstr(&c
, " ofs-delta");
379 if (deepen_since_ok
) strbuf_addstr(&c
, " deepen-since");
380 if (deepen_not_ok
) strbuf_addstr(&c
, " deepen-not");
381 if (agent_supported
) strbuf_addf(&c
, " agent=%s",
382 git_user_agent_sanitized());
383 if (args
->filter_options
.choice
)
384 strbuf_addstr(&c
, " filter");
385 packet_buf_write(&req_buf
, "want %s%s\n", remote_hex
, c
.buf
);
388 packet_buf_write(&req_buf
, "want %s\n", remote_hex
);
393 strbuf_release(&req_buf
);
398 if (is_repository_shallow())
399 write_shallow_commits(&req_buf
, 1, NULL
);
401 packet_buf_write(&req_buf
, "deepen %d", args
->depth
);
402 if (args
->deepen_since
) {
403 timestamp_t max_age
= approxidate(args
->deepen_since
);
404 packet_buf_write(&req_buf
, "deepen-since %"PRItime
, max_age
);
406 if (args
->deepen_not
) {
408 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
409 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
410 packet_buf_write(&req_buf
, "deepen-not %s", s
->string
);
413 if (server_supports_filtering
&& args
->filter_options
.choice
)
414 packet_buf_write(&req_buf
, "filter %s",
415 args
->filter_options
.filter_spec
);
416 packet_buf_flush(&req_buf
);
417 state_len
= req_buf
.len
;
422 struct object_id oid
;
424 send_request(args
, fd
[1], &req_buf
);
425 while ((line
= packet_read_line(fd
[0], NULL
))) {
426 if (skip_prefix(line
, "shallow ", &arg
)) {
427 if (get_oid_hex(arg
, &oid
))
428 die(_("invalid shallow line: %s"), line
);
429 register_shallow(&oid
);
432 if (skip_prefix(line
, "unshallow ", &arg
)) {
433 if (get_oid_hex(arg
, &oid
))
434 die(_("invalid unshallow line: %s"), line
);
435 if (!lookup_object(oid
.hash
))
436 die(_("object not found: %s"), line
);
437 /* make sure that it is parsed as shallow */
438 if (!parse_object(&oid
))
439 die(_("error in object: %s"), line
);
440 if (unregister_shallow(&oid
))
441 die(_("no shallow found: %s"), line
);
444 die(_("expected shallow/unshallow, got %s"), line
);
446 } else if (!args
->stateless_rpc
)
447 send_request(args
, fd
[1], &req_buf
);
449 if (!args
->stateless_rpc
) {
450 /* If we aren't using the stateless-rpc interface
451 * we don't need to retain the headers.
453 strbuf_setlen(&req_buf
, 0);
459 if (args
->no_dependents
)
461 while ((oid
= get_rev())) {
462 packet_buf_write(&req_buf
, "have %s\n", oid_to_hex(oid
));
463 print_verbose(args
, "have %s", oid_to_hex(oid
));
465 if (flush_at
<= ++count
) {
468 packet_buf_flush(&req_buf
);
469 send_request(args
, fd
[1], &req_buf
);
470 strbuf_setlen(&req_buf
, state_len
);
472 flush_at
= next_flush(args
, count
);
475 * We keep one window "ahead" of the other side, and
476 * will wait for an ACK only on the next one
478 if (!args
->stateless_rpc
&& count
== INITIAL_FLUSH
)
481 consume_shallow_list(args
, fd
[0]);
483 ack
= get_ack(fd
[0], result_oid
);
485 print_verbose(args
, _("got %s %d %s"), "ack",
486 ack
, oid_to_hex(result_oid
));
496 struct commit
*commit
=
497 lookup_commit(result_oid
);
499 die(_("invalid commit %s"), oid_to_hex(result_oid
));
500 if (args
->stateless_rpc
502 && !(commit
->object
.flags
& COMMON
)) {
503 /* We need to replay the have for this object
504 * on the next RPC request so the peer knows
505 * it is in common with us.
507 const char *hex
= oid_to_hex(result_oid
);
508 packet_buf_write(&req_buf
, "have %s\n", hex
);
509 state_len
= req_buf
.len
;
511 * Reset in_vain because an ack
512 * for this commit has not been
516 } else if (!args
->stateless_rpc
517 || ack
!= ACK_common
)
519 mark_common(commit
, 0, 1);
522 if (ack
== ACK_ready
) {
523 clear_prio_queue(&rev_list
);
531 if (got_continue
&& MAX_IN_VAIN
< in_vain
) {
532 print_verbose(args
, _("giving up"));
538 if (!got_ready
|| !no_done
) {
539 packet_buf_write(&req_buf
, "done\n");
540 send_request(args
, fd
[1], &req_buf
);
542 print_verbose(args
, _("done"));
547 strbuf_release(&req_buf
);
549 if (!got_ready
|| !no_done
)
550 consume_shallow_list(args
, fd
[0]);
551 while (flushes
|| multi_ack
) {
552 int ack
= get_ack(fd
[0], result_oid
);
554 print_verbose(args
, _("got %s (%d) %s"), "ack",
555 ack
, oid_to_hex(result_oid
));
563 /* it is no error to fetch into a completely empty repo */
564 return count
? retval
: 0;
567 static struct commit_list
*complete
;
569 static int mark_complete(const struct object_id
*oid
)
571 struct object
*o
= parse_object(oid
);
573 while (o
&& o
->type
== OBJ_TAG
) {
574 struct tag
*t
= (struct tag
*) o
;
576 break; /* broken repository */
577 o
->flags
|= COMPLETE
;
578 o
= parse_object(&t
->tagged
->oid
);
580 if (o
&& o
->type
== OBJ_COMMIT
) {
581 struct commit
*commit
= (struct commit
*)o
;
582 if (!(commit
->object
.flags
& COMPLETE
)) {
583 commit
->object
.flags
|= COMPLETE
;
584 commit_list_insert(commit
, &complete
);
590 static int mark_complete_oid(const char *refname
, const struct object_id
*oid
,
591 int flag
, void *cb_data
)
593 return mark_complete(oid
);
596 static void mark_recent_complete_commits(struct fetch_pack_args
*args
,
599 while (complete
&& cutoff
<= complete
->item
->date
) {
600 print_verbose(args
, _("Marking %s as complete"),
601 oid_to_hex(&complete
->item
->object
.oid
));
602 pop_most_recent_commit(&complete
, COMPLETE
);
606 static void add_refs_to_oidset(struct oidset
*oids
, struct ref
*refs
)
608 for (; refs
; refs
= refs
->next
)
609 oidset_insert(oids
, &refs
->old_oid
);
612 static int tip_oids_contain(struct oidset
*tip_oids
,
613 struct ref
*unmatched
, struct ref
*newlist
,
614 const struct object_id
*id
)
617 * Note that this only looks at the ref lists the first time it's
618 * called. This works out in filter_refs() because even though it may
619 * add to "newlist" between calls, the additions will always be for
620 * oids that are already in the set.
622 if (!tip_oids
->map
.map
.tablesize
) {
623 add_refs_to_oidset(tip_oids
, unmatched
);
624 add_refs_to_oidset(tip_oids
, newlist
);
626 return oidset_contains(tip_oids
, id
);
629 static void filter_refs(struct fetch_pack_args
*args
,
631 struct ref
**sought
, int nr_sought
)
633 struct ref
*newlist
= NULL
;
634 struct ref
**newtail
= &newlist
;
635 struct ref
*unmatched
= NULL
;
636 struct ref
*ref
, *next
;
637 struct oidset tip_oids
= OIDSET_INIT
;
641 for (ref
= *refs
; ref
; ref
= next
) {
645 if (starts_with(ref
->name
, "refs/") &&
646 check_refname_format(ref
->name
, 0))
649 while (i
< nr_sought
) {
650 int cmp
= strcmp(ref
->name
, sought
[i
]->name
);
652 break; /* definitely do not have it */
654 keep
= 1; /* definitely have it */
655 sought
[i
]->match_status
= REF_MATCHED
;
661 if (!keep
&& args
->fetch_all
&&
662 (!args
->deepen
|| !starts_with(ref
->name
, "refs/tags/")))
668 newtail
= &ref
->next
;
670 ref
->next
= unmatched
;
675 /* Append unmatched requests to the list */
676 for (i
= 0; i
< nr_sought
; i
++) {
677 struct object_id oid
;
681 if (ref
->match_status
!= REF_NOT_MATCHED
)
683 if (parse_oid_hex(ref
->name
, &oid
, &p
) ||
685 oidcmp(&oid
, &ref
->old_oid
))
688 if ((allow_unadvertised_object_request
&
689 (ALLOW_TIP_SHA1
| ALLOW_REACHABLE_SHA1
)) ||
690 tip_oids_contain(&tip_oids
, unmatched
, newlist
,
692 ref
->match_status
= REF_MATCHED
;
693 *newtail
= copy_ref(ref
);
694 newtail
= &(*newtail
)->next
;
696 ref
->match_status
= REF_UNADVERTISED_NOT_ALLOWED
;
700 oidset_clear(&tip_oids
);
701 for (ref
= unmatched
; ref
; ref
= next
) {
709 static void mark_alternate_complete(struct object
*obj
)
711 mark_complete(&obj
->oid
);
714 static int everything_local(struct fetch_pack_args
*args
,
716 struct ref
**sought
, int nr_sought
)
720 timestamp_t cutoff
= 0;
722 save_commit_buffer
= 0;
724 for (ref
= *refs
; ref
; ref
= ref
->next
) {
727 if (!has_object_file(&ref
->old_oid
))
730 o
= parse_object(&ref
->old_oid
);
734 /* We already have it -- which may mean that we were
735 * in sync with the other side at some time after
736 * that (it is OK if we guess wrong here).
738 if (o
->type
== OBJ_COMMIT
) {
739 struct commit
*commit
= (struct commit
*)o
;
740 if (!cutoff
|| cutoff
< commit
->date
)
741 cutoff
= commit
->date
;
745 if (!args
->no_dependents
) {
747 for_each_ref(mark_complete_oid
, NULL
);
748 for_each_cached_alternate(mark_alternate_complete
);
749 commit_list_sort_by_date(&complete
);
751 mark_recent_complete_commits(args
, cutoff
);
755 * Mark all complete remote refs as common refs.
756 * Don't mark them common yet; the server has to be told so first.
758 for (ref
= *refs
; ref
; ref
= ref
->next
) {
759 struct object
*o
= deref_tag(lookup_object(ref
->old_oid
.hash
),
762 if (!o
|| o
->type
!= OBJ_COMMIT
|| !(o
->flags
& COMPLETE
))
765 if (!(o
->flags
& SEEN
)) {
766 rev_list_push((struct commit
*)o
, COMMON_REF
| SEEN
);
768 mark_common((struct commit
*)o
, 1, 1);
773 filter_refs(args
, refs
, sought
, nr_sought
);
775 for (retval
= 1, ref
= *refs
; ref
; ref
= ref
->next
) {
776 const struct object_id
*remote
= &ref
->old_oid
;
779 o
= lookup_object(remote
->hash
);
780 if (!o
|| !(o
->flags
& COMPLETE
)) {
782 print_verbose(args
, "want %s (%s)", oid_to_hex(remote
),
786 print_verbose(args
, _("already have %s (%s)"), oid_to_hex(remote
),
792 static int sideband_demux(int in
, int out
, void *data
)
797 ret
= recv_sideband("fetch-pack", xd
[0], out
);
802 static int get_pack(struct fetch_pack_args
*args
,
803 int xd
[2], char **pack_lockfile
)
806 int do_keep
= args
->keep_pack
;
807 const char *cmd_name
;
808 struct pack_header header
;
810 struct child_process cmd
= CHILD_PROCESS_INIT
;
813 memset(&demux
, 0, sizeof(demux
));
815 /* xd[] is talking with upload-pack; subprocess reads from
816 * xd[0], spits out band#2 to stderr, and feeds us band#1
817 * through demux->out.
819 demux
.proc
= sideband_demux
;
822 demux
.isolate_sigpipe
= 1;
823 if (start_async(&demux
))
824 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
829 if (!args
->keep_pack
&& unpack_limit
) {
831 if (read_pack_header(demux
.out
, &header
))
832 die(_("protocol error: bad pack header"));
834 if (ntohl(header
.hdr_entries
) < unpack_limit
)
840 if (alternate_shallow_file
) {
841 argv_array_push(&cmd
.args
, "--shallow-file");
842 argv_array_push(&cmd
.args
, alternate_shallow_file
);
845 if (do_keep
|| args
->from_promisor
) {
848 cmd_name
= "index-pack";
849 argv_array_push(&cmd
.args
, cmd_name
);
850 argv_array_push(&cmd
.args
, "--stdin");
851 if (!args
->quiet
&& !args
->no_progress
)
852 argv_array_push(&cmd
.args
, "-v");
853 if (args
->use_thin_pack
)
854 argv_array_push(&cmd
.args
, "--fix-thin");
855 if (do_keep
&& (args
->lock_pack
|| unpack_limit
)) {
856 char hostname
[HOST_NAME_MAX
+ 1];
857 if (xgethostname(hostname
, sizeof(hostname
)))
858 xsnprintf(hostname
, sizeof(hostname
), "localhost");
859 argv_array_pushf(&cmd
.args
,
860 "--keep=fetch-pack %"PRIuMAX
" on %s",
861 (uintmax_t)getpid(), hostname
);
863 if (args
->check_self_contained_and_connected
)
864 argv_array_push(&cmd
.args
, "--check-self-contained-and-connected");
865 if (args
->from_promisor
)
866 argv_array_push(&cmd
.args
, "--promisor");
869 cmd_name
= "unpack-objects";
870 argv_array_push(&cmd
.args
, cmd_name
);
871 if (args
->quiet
|| args
->no_progress
)
872 argv_array_push(&cmd
.args
, "-q");
873 args
->check_self_contained_and_connected
= 0;
877 argv_array_pushf(&cmd
.args
, "--pack_header=%"PRIu32
",%"PRIu32
,
878 ntohl(header
.hdr_version
),
879 ntohl(header
.hdr_entries
));
880 if (fetch_fsck_objects
>= 0
882 : transfer_fsck_objects
>= 0
883 ? transfer_fsck_objects
885 argv_array_push(&cmd
.args
, "--strict");
889 if (start_command(&cmd
))
890 die(_("fetch-pack: unable to fork off %s"), cmd_name
);
891 if (do_keep
&& pack_lockfile
) {
892 *pack_lockfile
= index_pack_lockfile(cmd
.out
);
897 /* Closed by start_command() */
900 ret
= finish_command(&cmd
);
901 if (!ret
|| (args
->check_self_contained_and_connected
&& ret
== 1))
902 args
->self_contained_and_connected
=
903 args
->check_self_contained_and_connected
&&
906 die(_("%s failed"), cmd_name
);
907 if (use_sideband
&& finish_async(&demux
))
908 die(_("error in sideband demultiplexer"));
912 static int cmp_ref_by_name(const void *a_
, const void *b_
)
914 const struct ref
*a
= *((const struct ref
**)a_
);
915 const struct ref
*b
= *((const struct ref
**)b_
);
916 return strcmp(a
->name
, b
->name
);
919 static struct ref
*do_fetch_pack(struct fetch_pack_args
*args
,
921 const struct ref
*orig_ref
,
922 struct ref
**sought
, int nr_sought
,
923 struct shallow_info
*si
,
924 char **pack_lockfile
)
926 struct ref
*ref
= copy_ref_list(orig_ref
);
927 struct object_id oid
;
928 const char *agent_feature
;
931 sort_ref_list(&ref
, ref_compare_name
);
932 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
934 if ((args
->depth
> 0 || is_repository_shallow()) && !server_supports("shallow"))
935 die(_("Server does not support shallow clients"));
936 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
938 if (server_supports("multi_ack_detailed")) {
939 print_verbose(args
, _("Server supports multi_ack_detailed"));
941 if (server_supports("no-done")) {
942 print_verbose(args
, _("Server supports no-done"));
943 if (args
->stateless_rpc
)
947 else if (server_supports("multi_ack")) {
948 print_verbose(args
, _("Server supports multi_ack"));
951 if (server_supports("side-band-64k")) {
952 print_verbose(args
, _("Server supports side-band-64k"));
955 else if (server_supports("side-band")) {
956 print_verbose(args
, _("Server supports side-band"));
959 if (server_supports("allow-tip-sha1-in-want")) {
960 print_verbose(args
, _("Server supports allow-tip-sha1-in-want"));
961 allow_unadvertised_object_request
|= ALLOW_TIP_SHA1
;
963 if (server_supports("allow-reachable-sha1-in-want")) {
964 print_verbose(args
, _("Server supports allow-reachable-sha1-in-want"));
965 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
967 if (!server_supports("thin-pack"))
968 args
->use_thin_pack
= 0;
969 if (!server_supports("no-progress"))
970 args
->no_progress
= 0;
971 if (!server_supports("include-tag"))
972 args
->include_tag
= 0;
973 if (server_supports("ofs-delta"))
974 print_verbose(args
, _("Server supports ofs-delta"));
976 prefer_ofs_delta
= 0;
978 if (server_supports("filter")) {
979 server_supports_filtering
= 1;
980 print_verbose(args
, _("Server supports filter"));
981 } else if (args
->filter_options
.choice
) {
982 warning("filtering not recognized by server, ignoring");
985 if ((agent_feature
= server_feature_value("agent", &agent_len
))) {
988 print_verbose(args
, _("Server version is %.*s"),
989 agent_len
, agent_feature
);
991 if (server_supports("deepen-since"))
993 else if (args
->deepen_since
)
994 die(_("Server does not support --shallow-since"));
995 if (server_supports("deepen-not"))
997 else if (args
->deepen_not
)
998 die(_("Server does not support --shallow-exclude"));
999 if (!server_supports("deepen-relative") && args
->deepen_relative
)
1000 die(_("Server does not support --deepen"));
1002 if (everything_local(args
, &ref
, sought
, nr_sought
)) {
1003 packet_flush(fd
[1]);
1006 if (find_common(args
, fd
, &oid
, ref
) < 0)
1007 if (!args
->keep_pack
)
1008 /* When cloning, it is not unusual to have
1011 warning(_("no common commits"));
1013 if (args
->stateless_rpc
)
1014 packet_flush(fd
[1]);
1016 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
,
1018 else if (si
->nr_ours
|| si
->nr_theirs
)
1019 alternate_shallow_file
= setup_temporary_shallow(si
->shallow
);
1021 alternate_shallow_file
= NULL
;
1022 if (get_pack(args
, fd
, pack_lockfile
))
1023 die(_("git fetch-pack: fetch failed."));
1029 static void fetch_pack_config(void)
1031 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit
);
1032 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit
);
1033 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta
);
1034 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects
);
1035 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects
);
1037 git_config(git_default_config
, NULL
);
1040 static void fetch_pack_setup(void)
1042 static int did_setup
;
1045 fetch_pack_config();
1046 if (0 <= transfer_unpack_limit
)
1047 unpack_limit
= transfer_unpack_limit
;
1048 else if (0 <= fetch_unpack_limit
)
1049 unpack_limit
= fetch_unpack_limit
;
1053 static int remove_duplicates_in_refs(struct ref
**ref
, int nr
)
1055 struct string_list names
= STRING_LIST_INIT_NODUP
;
1058 for (src
= dst
= 0; src
< nr
; src
++) {
1059 struct string_list_item
*item
;
1060 item
= string_list_insert(&names
, ref
[src
]->name
);
1062 continue; /* already have it */
1063 item
->util
= ref
[src
];
1065 ref
[dst
] = ref
[src
];
1068 for (src
= dst
; src
< nr
; src
++)
1070 string_list_clear(&names
, 0);
1074 static void update_shallow(struct fetch_pack_args
*args
,
1075 struct ref
**sought
, int nr_sought
,
1076 struct shallow_info
*si
)
1078 struct oid_array ref
= OID_ARRAY_INIT
;
1082 if (args
->deepen
&& alternate_shallow_file
) {
1083 if (*alternate_shallow_file
== '\0') { /* --unshallow */
1084 unlink_or_warn(git_path_shallow());
1085 rollback_lock_file(&shallow_lock
);
1087 commit_lock_file(&shallow_lock
);
1091 if (!si
->shallow
|| !si
->shallow
->nr
)
1094 if (args
->cloning
) {
1096 * remote is shallow, but this is a clone, there are
1097 * no objects in repo to worry about. Accept any
1098 * shallow points that exist in the pack (iow in repo
1099 * after get_pack() and reprepare_packed_git())
1101 struct oid_array extra
= OID_ARRAY_INIT
;
1102 struct object_id
*oid
= si
->shallow
->oid
;
1103 for (i
= 0; i
< si
->shallow
->nr
; i
++)
1104 if (has_object_file(&oid
[i
]))
1105 oid_array_append(&extra
, &oid
[i
]);
1107 setup_alternate_shallow(&shallow_lock
,
1108 &alternate_shallow_file
,
1110 commit_lock_file(&shallow_lock
);
1112 oid_array_clear(&extra
);
1116 if (!si
->nr_ours
&& !si
->nr_theirs
)
1119 remove_nonexistent_theirs_shallow(si
);
1120 if (!si
->nr_ours
&& !si
->nr_theirs
)
1122 for (i
= 0; i
< nr_sought
; i
++)
1123 oid_array_append(&ref
, &sought
[i
]->old_oid
);
1126 if (args
->update_shallow
) {
1128 * remote is also shallow, .git/shallow may be updated
1129 * so all refs can be accepted. Make sure we only add
1130 * shallow roots that are actually reachable from new
1133 struct oid_array extra
= OID_ARRAY_INIT
;
1134 struct object_id
*oid
= si
->shallow
->oid
;
1135 assign_shallow_commits_to_refs(si
, NULL
, NULL
);
1136 if (!si
->nr_ours
&& !si
->nr_theirs
) {
1137 oid_array_clear(&ref
);
1140 for (i
= 0; i
< si
->nr_ours
; i
++)
1141 oid_array_append(&extra
, &oid
[si
->ours
[i
]]);
1142 for (i
= 0; i
< si
->nr_theirs
; i
++)
1143 oid_array_append(&extra
, &oid
[si
->theirs
[i
]]);
1144 setup_alternate_shallow(&shallow_lock
,
1145 &alternate_shallow_file
,
1147 commit_lock_file(&shallow_lock
);
1148 oid_array_clear(&extra
);
1149 oid_array_clear(&ref
);
1154 * remote is also shallow, check what ref is safe to update
1155 * without updating .git/shallow
1157 status
= xcalloc(nr_sought
, sizeof(*status
));
1158 assign_shallow_commits_to_refs(si
, NULL
, status
);
1159 if (si
->nr_ours
|| si
->nr_theirs
) {
1160 for (i
= 0; i
< nr_sought
; i
++)
1162 sought
[i
]->status
= REF_STATUS_REJECT_SHALLOW
;
1165 oid_array_clear(&ref
);
1168 struct ref
*fetch_pack(struct fetch_pack_args
*args
,
1169 int fd
[], struct child_process
*conn
,
1170 const struct ref
*ref
,
1172 struct ref
**sought
, int nr_sought
,
1173 struct oid_array
*shallow
,
1174 char **pack_lockfile
)
1176 struct ref
*ref_cpy
;
1177 struct shallow_info si
;
1181 nr_sought
= remove_duplicates_in_refs(sought
, nr_sought
);
1184 packet_flush(fd
[1]);
1185 die(_("no matching remote head"));
1187 prepare_shallow_info(&si
, shallow
);
1188 ref_cpy
= do_fetch_pack(args
, fd
, ref
, sought
, nr_sought
,
1189 &si
, pack_lockfile
);
1190 reprepare_packed_git();
1191 update_shallow(args
, sought
, nr_sought
, &si
);
1192 clear_shallow_info(&si
);
1196 int report_unmatched_refs(struct ref
**sought
, int nr_sought
)
1200 for (i
= 0; i
< nr_sought
; i
++) {
1203 switch (sought
[i
]->match_status
) {
1206 case REF_NOT_MATCHED
:
1207 error(_("no such remote ref %s"), sought
[i
]->name
);
1209 case REF_UNADVERTISED_NOT_ALLOWED
:
1210 error(_("Server does not allow request for unadvertised object %s"),