11 #include "fetch-pack.h"
13 #include "run-command.h"
15 #include "transport.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
22 static int transfer_unpack_limit
= -1;
23 static int fetch_unpack_limit
= -1;
24 static int unpack_limit
= 100;
25 static int prefer_ofs_delta
= 1;
27 static int deepen_since_ok
;
28 static int deepen_not_ok
;
29 static int fetch_fsck_objects
= -1;
30 static int transfer_fsck_objects
= -1;
31 static int agent_supported
;
32 static struct lock_file shallow_lock
;
33 static const char *alternate_shallow_file
;
35 /* Remember to update object flag allocation in object.h */
36 #define COMPLETE (1U << 0)
37 #define COMMON (1U << 1)
38 #define COMMON_REF (1U << 2)
39 #define SEEN (1U << 3)
40 #define POPPED (1U << 4)
41 #define ALTERNATE (1U << 5)
46 * After sending this many "have"s if we do not get any new ACK , we
47 * give up traversing our history.
49 #define MAX_IN_VAIN 256
51 static struct prio_queue rev_list
= { compare_commits_by_commit_date
};
52 static int non_common_revs
, multi_ack
, use_sideband
;
53 /* Allow specifying sha1 if it is a ref tip. */
54 #define ALLOW_TIP_SHA1 01
55 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
56 #define ALLOW_REACHABLE_SHA1 02
57 static unsigned int allow_unadvertised_object_request
;
59 __attribute__((format (printf
, 2, 3)))
60 static inline void print_verbose(const struct fetch_pack_args
*args
,
68 va_start(params
, fmt
);
69 vfprintf(stderr
, fmt
, params
);
74 struct alternate_object_cache
{
75 struct object
**items
;
79 static void cache_one_alternate(const char *refname
,
80 const struct object_id
*oid
,
83 struct alternate_object_cache
*cache
= vcache
;
84 struct object
*obj
= parse_object(oid
);
86 if (!obj
|| (obj
->flags
& ALTERNATE
))
89 obj
->flags
|= ALTERNATE
;
90 ALLOC_GROW(cache
->items
, cache
->nr
+ 1, cache
->alloc
);
91 cache
->items
[cache
->nr
++] = obj
;
94 static void for_each_cached_alternate(void (*cb
)(struct object
*))
96 static int initialized
;
97 static struct alternate_object_cache cache
;
101 for_each_alternate_ref(cache_one_alternate
, &cache
);
105 for (i
= 0; i
< cache
.nr
; i
++)
109 static void rev_list_push(struct commit
*commit
, int mark
)
111 if (!(commit
->object
.flags
& mark
)) {
112 commit
->object
.flags
|= mark
;
114 if (parse_commit(commit
))
117 prio_queue_put(&rev_list
, commit
);
119 if (!(commit
->object
.flags
& COMMON
))
124 static int rev_list_insert_ref(const char *refname
, const struct object_id
*oid
)
126 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
128 if (o
&& o
->type
== OBJ_COMMIT
)
129 rev_list_push((struct commit
*)o
, SEEN
);
134 static int rev_list_insert_ref_oid(const char *refname
, const struct object_id
*oid
,
135 int flag
, void *cb_data
)
137 return rev_list_insert_ref(refname
, oid
);
140 static int clear_marks(const char *refname
, const struct object_id
*oid
,
141 int flag
, void *cb_data
)
143 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
145 if (o
&& o
->type
== OBJ_COMMIT
)
146 clear_commit_marks((struct commit
*)o
,
147 COMMON
| COMMON_REF
| SEEN
| POPPED
);
152 This function marks a rev and its ancestors as common.
153 In some cases, it is desirable to mark only the ancestors (for example
154 when only the server does not yet know that they are common).
157 static void mark_common(struct commit
*commit
,
158 int ancestors_only
, int dont_parse
)
160 if (commit
!= NULL
&& !(commit
->object
.flags
& COMMON
)) {
161 struct object
*o
= (struct object
*)commit
;
166 if (!(o
->flags
& SEEN
))
167 rev_list_push(commit
, SEEN
);
169 struct commit_list
*parents
;
171 if (!ancestors_only
&& !(o
->flags
& POPPED
))
173 if (!o
->parsed
&& !dont_parse
)
174 if (parse_commit(commit
))
177 for (parents
= commit
->parents
;
179 parents
= parents
->next
)
180 mark_common(parents
->item
, 0, dont_parse
);
186 Get the next rev to send, ignoring the common.
189 static const struct object_id
*get_rev(void)
191 struct commit
*commit
= NULL
;
193 while (commit
== NULL
) {
195 struct commit_list
*parents
;
197 if (rev_list
.nr
== 0 || non_common_revs
== 0)
200 commit
= prio_queue_get(&rev_list
);
201 parse_commit(commit
);
202 parents
= commit
->parents
;
204 commit
->object
.flags
|= POPPED
;
205 if (!(commit
->object
.flags
& COMMON
))
208 if (commit
->object
.flags
& COMMON
) {
209 /* do not send "have", and ignore ancestors */
211 mark
= COMMON
| SEEN
;
212 } else if (commit
->object
.flags
& COMMON_REF
)
213 /* send "have", and ignore ancestors */
214 mark
= COMMON
| SEEN
;
216 /* send "have", also for its ancestors */
220 if (!(parents
->item
->object
.flags
& SEEN
))
221 rev_list_push(parents
->item
, mark
);
223 mark_common(parents
->item
, 1, 0);
224 parents
= parents
->next
;
228 return &commit
->object
.oid
;
239 static void consume_shallow_list(struct fetch_pack_args
*args
, int fd
)
241 if (args
->stateless_rpc
&& args
->deepen
) {
242 /* If we sent a depth we will get back "duplicate"
243 * shallow and unshallow commands every time there
244 * is a block of have lines exchanged.
247 while ((line
= packet_read_line(fd
, NULL
))) {
248 if (starts_with(line
, "shallow "))
250 if (starts_with(line
, "unshallow "))
252 die(_("git fetch-pack: expected shallow list"));
257 static enum ack_type
get_ack(int fd
, struct object_id
*result_oid
)
260 char *line
= packet_read_line(fd
, &len
);
264 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
265 if (!strcmp(line
, "NAK"))
267 if (skip_prefix(line
, "ACK ", &arg
)) {
268 if (!get_oid_hex(arg
, result_oid
)) {
273 if (strstr(arg
, "continue"))
275 if (strstr(arg
, "common"))
277 if (strstr(arg
, "ready"))
282 if (skip_prefix(line
, "ERR ", &arg
))
283 die(_("remote error: %s"), arg
);
284 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line
);
287 static void send_request(struct fetch_pack_args
*args
,
288 int fd
, struct strbuf
*buf
)
290 if (args
->stateless_rpc
) {
291 send_sideband(fd
, -1, buf
->buf
, buf
->len
, LARGE_PACKET_MAX
);
294 write_or_die(fd
, buf
->buf
, buf
->len
);
297 static void insert_one_alternate_object(struct object
*obj
)
299 rev_list_insert_ref(NULL
, &obj
->oid
);
302 #define INITIAL_FLUSH 16
303 #define PIPESAFE_FLUSH 32
304 #define LARGE_FLUSH 16384
306 static int next_flush(struct fetch_pack_args
*args
, int count
)
308 if (args
->stateless_rpc
) {
309 if (count
< LARGE_FLUSH
)
312 count
= count
* 11 / 10;
314 if (count
< PIPESAFE_FLUSH
)
317 count
+= PIPESAFE_FLUSH
;
322 static int find_common(struct fetch_pack_args
*args
,
323 int fd
[2], struct object_id
*result_oid
,
327 int count
= 0, flushes
= 0, flush_at
= INITIAL_FLUSH
, retval
;
328 const struct object_id
*oid
;
329 unsigned in_vain
= 0;
330 int got_continue
= 0;
332 struct strbuf req_buf
= STRBUF_INIT
;
333 size_t state_len
= 0;
335 if (args
->stateless_rpc
&& multi_ack
== 1)
336 die(_("--stateless-rpc requires multi_ack_detailed"));
338 for_each_ref(clear_marks
, NULL
);
341 for_each_ref(rev_list_insert_ref_oid
, NULL
);
342 for_each_cached_alternate(insert_one_alternate_object
);
345 for ( ; refs
; refs
= refs
->next
) {
346 struct object_id
*remote
= &refs
->old_oid
;
347 const char *remote_hex
;
351 * If that object is complete (i.e. it is an ancestor of a
352 * local ref), we tell them we have it but do not have to
353 * tell them about its ancestors, which they already know
356 * We use lookup_object here because we are only
357 * interested in the case we *know* the object is
358 * reachable and we have already scanned it.
360 if (((o
= lookup_object(remote
->hash
)) != NULL
) &&
361 (o
->flags
& COMPLETE
)) {
365 remote_hex
= oid_to_hex(remote
);
367 struct strbuf c
= STRBUF_INIT
;
368 if (multi_ack
== 2) strbuf_addstr(&c
, " multi_ack_detailed");
369 if (multi_ack
== 1) strbuf_addstr(&c
, " multi_ack");
370 if (no_done
) strbuf_addstr(&c
, " no-done");
371 if (use_sideband
== 2) strbuf_addstr(&c
, " side-band-64k");
372 if (use_sideband
== 1) strbuf_addstr(&c
, " side-band");
373 if (args
->deepen_relative
) strbuf_addstr(&c
, " deepen-relative");
374 if (args
->use_thin_pack
) strbuf_addstr(&c
, " thin-pack");
375 if (args
->no_progress
) strbuf_addstr(&c
, " no-progress");
376 if (args
->include_tag
) strbuf_addstr(&c
, " include-tag");
377 if (prefer_ofs_delta
) strbuf_addstr(&c
, " ofs-delta");
378 if (deepen_since_ok
) strbuf_addstr(&c
, " deepen-since");
379 if (deepen_not_ok
) strbuf_addstr(&c
, " deepen-not");
380 if (agent_supported
) strbuf_addf(&c
, " agent=%s",
381 git_user_agent_sanitized());
382 packet_buf_write(&req_buf
, "want %s%s\n", remote_hex
, c
.buf
);
385 packet_buf_write(&req_buf
, "want %s\n", remote_hex
);
390 strbuf_release(&req_buf
);
395 if (is_repository_shallow())
396 write_shallow_commits(&req_buf
, 1, NULL
);
398 packet_buf_write(&req_buf
, "deepen %d", args
->depth
);
399 if (args
->deepen_since
) {
400 timestamp_t max_age
= approxidate(args
->deepen_since
);
401 packet_buf_write(&req_buf
, "deepen-since %"PRItime
, max_age
);
403 if (args
->deepen_not
) {
405 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
406 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
407 packet_buf_write(&req_buf
, "deepen-not %s", s
->string
);
410 packet_buf_flush(&req_buf
);
411 state_len
= req_buf
.len
;
416 struct object_id oid
;
418 send_request(args
, fd
[1], &req_buf
);
419 while ((line
= packet_read_line(fd
[0], NULL
))) {
420 if (skip_prefix(line
, "shallow ", &arg
)) {
421 if (get_oid_hex(arg
, &oid
))
422 die(_("invalid shallow line: %s"), line
);
423 register_shallow(&oid
);
426 if (skip_prefix(line
, "unshallow ", &arg
)) {
427 if (get_oid_hex(arg
, &oid
))
428 die(_("invalid unshallow line: %s"), line
);
429 if (!lookup_object(oid
.hash
))
430 die(_("object not found: %s"), line
);
431 /* make sure that it is parsed as shallow */
432 if (!parse_object(&oid
))
433 die(_("error in object: %s"), line
);
434 if (unregister_shallow(&oid
))
435 die(_("no shallow found: %s"), line
);
438 die(_("expected shallow/unshallow, got %s"), line
);
440 } else if (!args
->stateless_rpc
)
441 send_request(args
, fd
[1], &req_buf
);
443 if (!args
->stateless_rpc
) {
444 /* If we aren't using the stateless-rpc interface
445 * we don't need to retain the headers.
447 strbuf_setlen(&req_buf
, 0);
453 while ((oid
= get_rev())) {
454 packet_buf_write(&req_buf
, "have %s\n", oid_to_hex(oid
));
455 print_verbose(args
, "have %s", oid_to_hex(oid
));
457 if (flush_at
<= ++count
) {
460 packet_buf_flush(&req_buf
);
461 send_request(args
, fd
[1], &req_buf
);
462 strbuf_setlen(&req_buf
, state_len
);
464 flush_at
= next_flush(args
, count
);
467 * We keep one window "ahead" of the other side, and
468 * will wait for an ACK only on the next one
470 if (!args
->stateless_rpc
&& count
== INITIAL_FLUSH
)
473 consume_shallow_list(args
, fd
[0]);
475 ack
= get_ack(fd
[0], result_oid
);
477 print_verbose(args
, _("got %s %d %s"), "ack",
478 ack
, oid_to_hex(result_oid
));
488 struct commit
*commit
=
489 lookup_commit(result_oid
);
491 die(_("invalid commit %s"), oid_to_hex(result_oid
));
492 if (args
->stateless_rpc
494 && !(commit
->object
.flags
& COMMON
)) {
495 /* We need to replay the have for this object
496 * on the next RPC request so the peer knows
497 * it is in common with us.
499 const char *hex
= oid_to_hex(result_oid
);
500 packet_buf_write(&req_buf
, "have %s\n", hex
);
501 state_len
= req_buf
.len
;
503 * Reset in_vain because an ack
504 * for this commit has not been
508 } else if (!args
->stateless_rpc
509 || ack
!= ACK_common
)
511 mark_common(commit
, 0, 1);
514 if (ack
== ACK_ready
) {
515 clear_prio_queue(&rev_list
);
523 if (got_continue
&& MAX_IN_VAIN
< in_vain
) {
524 print_verbose(args
, _("giving up"));
530 if (!got_ready
|| !no_done
) {
531 packet_buf_write(&req_buf
, "done\n");
532 send_request(args
, fd
[1], &req_buf
);
534 print_verbose(args
, _("done"));
539 strbuf_release(&req_buf
);
541 if (!got_ready
|| !no_done
)
542 consume_shallow_list(args
, fd
[0]);
543 while (flushes
|| multi_ack
) {
544 int ack
= get_ack(fd
[0], result_oid
);
546 print_verbose(args
, _("got %s (%d) %s"), "ack",
547 ack
, oid_to_hex(result_oid
));
555 /* it is no error to fetch into a completely empty repo */
556 return count
? retval
: 0;
559 static struct commit_list
*complete
;
561 static int mark_complete(const struct object_id
*oid
)
563 struct object
*o
= parse_object(oid
);
565 while (o
&& o
->type
== OBJ_TAG
) {
566 struct tag
*t
= (struct tag
*) o
;
568 break; /* broken repository */
569 o
->flags
|= COMPLETE
;
570 o
= parse_object(&t
->tagged
->oid
);
572 if (o
&& o
->type
== OBJ_COMMIT
) {
573 struct commit
*commit
= (struct commit
*)o
;
574 if (!(commit
->object
.flags
& COMPLETE
)) {
575 commit
->object
.flags
|= COMPLETE
;
576 commit_list_insert(commit
, &complete
);
582 static int mark_complete_oid(const char *refname
, const struct object_id
*oid
,
583 int flag
, void *cb_data
)
585 return mark_complete(oid
);
588 static void mark_recent_complete_commits(struct fetch_pack_args
*args
,
591 while (complete
&& cutoff
<= complete
->item
->date
) {
592 print_verbose(args
, _("Marking %s as complete"),
593 oid_to_hex(&complete
->item
->object
.oid
));
594 pop_most_recent_commit(&complete
, COMPLETE
);
598 static void add_refs_to_oidset(struct oidset
*oids
, struct ref
*refs
)
600 for (; refs
; refs
= refs
->next
)
601 oidset_insert(oids
, &refs
->old_oid
);
604 static int tip_oids_contain(struct oidset
*tip_oids
,
605 struct ref
*unmatched
, struct ref
*newlist
,
606 const struct object_id
*id
)
609 * Note that this only looks at the ref lists the first time it's
610 * called. This works out in filter_refs() because even though it may
611 * add to "newlist" between calls, the additions will always be for
612 * oids that are already in the set.
614 if (!tip_oids
->map
.map
.tablesize
) {
615 add_refs_to_oidset(tip_oids
, unmatched
);
616 add_refs_to_oidset(tip_oids
, newlist
);
618 return oidset_contains(tip_oids
, id
);
621 static void filter_refs(struct fetch_pack_args
*args
,
623 struct ref
**sought
, int nr_sought
)
625 struct ref
*newlist
= NULL
;
626 struct ref
**newtail
= &newlist
;
627 struct ref
*unmatched
= NULL
;
628 struct ref
*ref
, *next
;
629 struct oidset tip_oids
= OIDSET_INIT
;
633 for (ref
= *refs
; ref
; ref
= next
) {
637 if (starts_with(ref
->name
, "refs/") &&
638 check_refname_format(ref
->name
, 0))
641 while (i
< nr_sought
) {
642 int cmp
= strcmp(ref
->name
, sought
[i
]->name
);
644 break; /* definitely do not have it */
646 keep
= 1; /* definitely have it */
647 sought
[i
]->match_status
= REF_MATCHED
;
653 if (!keep
&& args
->fetch_all
&&
654 (!args
->deepen
|| !starts_with(ref
->name
, "refs/tags/")))
660 newtail
= &ref
->next
;
662 ref
->next
= unmatched
;
667 /* Append unmatched requests to the list */
668 for (i
= 0; i
< nr_sought
; i
++) {
669 struct object_id oid
;
673 if (ref
->match_status
!= REF_NOT_MATCHED
)
675 if (parse_oid_hex(ref
->name
, &oid
, &p
) ||
677 oidcmp(&oid
, &ref
->old_oid
))
680 if ((allow_unadvertised_object_request
&
681 (ALLOW_TIP_SHA1
| ALLOW_REACHABLE_SHA1
)) ||
682 tip_oids_contain(&tip_oids
, unmatched
, newlist
,
684 ref
->match_status
= REF_MATCHED
;
685 *newtail
= copy_ref(ref
);
686 newtail
= &(*newtail
)->next
;
688 ref
->match_status
= REF_UNADVERTISED_NOT_ALLOWED
;
692 oidset_clear(&tip_oids
);
693 for (ref
= unmatched
; ref
; ref
= next
) {
701 static void mark_alternate_complete(struct object
*obj
)
703 mark_complete(&obj
->oid
);
706 static int everything_local(struct fetch_pack_args
*args
,
708 struct ref
**sought
, int nr_sought
)
712 timestamp_t cutoff
= 0;
714 save_commit_buffer
= 0;
716 for (ref
= *refs
; ref
; ref
= ref
->next
) {
719 if (!has_object_file(&ref
->old_oid
))
722 o
= parse_object(&ref
->old_oid
);
726 /* We already have it -- which may mean that we were
727 * in sync with the other side at some time after
728 * that (it is OK if we guess wrong here).
730 if (o
->type
== OBJ_COMMIT
) {
731 struct commit
*commit
= (struct commit
*)o
;
732 if (!cutoff
|| cutoff
< commit
->date
)
733 cutoff
= commit
->date
;
738 for_each_ref(mark_complete_oid
, NULL
);
739 for_each_cached_alternate(mark_alternate_complete
);
740 commit_list_sort_by_date(&complete
);
742 mark_recent_complete_commits(args
, cutoff
);
746 * Mark all complete remote refs as common refs.
747 * Don't mark them common yet; the server has to be told so first.
749 for (ref
= *refs
; ref
; ref
= ref
->next
) {
750 struct object
*o
= deref_tag(lookup_object(ref
->old_oid
.hash
),
753 if (!o
|| o
->type
!= OBJ_COMMIT
|| !(o
->flags
& COMPLETE
))
756 if (!(o
->flags
& SEEN
)) {
757 rev_list_push((struct commit
*)o
, COMMON_REF
| SEEN
);
759 mark_common((struct commit
*)o
, 1, 1);
763 filter_refs(args
, refs
, sought
, nr_sought
);
765 for (retval
= 1, ref
= *refs
; ref
; ref
= ref
->next
) {
766 const struct object_id
*remote
= &ref
->old_oid
;
769 o
= lookup_object(remote
->hash
);
770 if (!o
|| !(o
->flags
& COMPLETE
)) {
772 print_verbose(args
, "want %s (%s)", oid_to_hex(remote
),
776 print_verbose(args
, _("already have %s (%s)"), oid_to_hex(remote
),
782 static int sideband_demux(int in
, int out
, void *data
)
787 ret
= recv_sideband("fetch-pack", xd
[0], out
);
792 static int get_pack(struct fetch_pack_args
*args
,
793 int xd
[2], char **pack_lockfile
)
796 int do_keep
= args
->keep_pack
;
797 const char *cmd_name
;
798 struct pack_header header
;
800 struct child_process cmd
= CHILD_PROCESS_INIT
;
803 memset(&demux
, 0, sizeof(demux
));
805 /* xd[] is talking with upload-pack; subprocess reads from
806 * xd[0], spits out band#2 to stderr, and feeds us band#1
807 * through demux->out.
809 demux
.proc
= sideband_demux
;
812 demux
.isolate_sigpipe
= 1;
813 if (start_async(&demux
))
814 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
819 if (!args
->keep_pack
&& unpack_limit
) {
821 if (read_pack_header(demux
.out
, &header
))
822 die(_("protocol error: bad pack header"));
824 if (ntohl(header
.hdr_entries
) < unpack_limit
)
830 if (alternate_shallow_file
) {
831 argv_array_push(&cmd
.args
, "--shallow-file");
832 argv_array_push(&cmd
.args
, alternate_shallow_file
);
838 cmd_name
= "index-pack";
839 argv_array_push(&cmd
.args
, cmd_name
);
840 argv_array_push(&cmd
.args
, "--stdin");
841 if (!args
->quiet
&& !args
->no_progress
)
842 argv_array_push(&cmd
.args
, "-v");
843 if (args
->use_thin_pack
)
844 argv_array_push(&cmd
.args
, "--fix-thin");
845 if (args
->lock_pack
|| unpack_limit
) {
846 char hostname
[HOST_NAME_MAX
+ 1];
847 if (xgethostname(hostname
, sizeof(hostname
)))
848 xsnprintf(hostname
, sizeof(hostname
), "localhost");
849 argv_array_pushf(&cmd
.args
,
850 "--keep=fetch-pack %"PRIuMAX
" on %s",
851 (uintmax_t)getpid(), hostname
);
853 if (args
->check_self_contained_and_connected
)
854 argv_array_push(&cmd
.args
, "--check-self-contained-and-connected");
857 cmd_name
= "unpack-objects";
858 argv_array_push(&cmd
.args
, cmd_name
);
859 if (args
->quiet
|| args
->no_progress
)
860 argv_array_push(&cmd
.args
, "-q");
861 args
->check_self_contained_and_connected
= 0;
865 argv_array_pushf(&cmd
.args
, "--pack_header=%"PRIu32
",%"PRIu32
,
866 ntohl(header
.hdr_version
),
867 ntohl(header
.hdr_entries
));
868 if (fetch_fsck_objects
>= 0
870 : transfer_fsck_objects
>= 0
871 ? transfer_fsck_objects
873 argv_array_push(&cmd
.args
, "--strict");
877 if (start_command(&cmd
))
878 die(_("fetch-pack: unable to fork off %s"), cmd_name
);
879 if (do_keep
&& pack_lockfile
) {
880 *pack_lockfile
= index_pack_lockfile(cmd
.out
);
885 /* Closed by start_command() */
888 ret
= finish_command(&cmd
);
889 if (!ret
|| (args
->check_self_contained_and_connected
&& ret
== 1))
890 args
->self_contained_and_connected
=
891 args
->check_self_contained_and_connected
&&
894 die(_("%s failed"), cmd_name
);
895 if (use_sideband
&& finish_async(&demux
))
896 die(_("error in sideband demultiplexer"));
900 static int cmp_ref_by_name(const void *a_
, const void *b_
)
902 const struct ref
*a
= *((const struct ref
**)a_
);
903 const struct ref
*b
= *((const struct ref
**)b_
);
904 return strcmp(a
->name
, b
->name
);
907 static struct ref
*do_fetch_pack(struct fetch_pack_args
*args
,
909 const struct ref
*orig_ref
,
910 struct ref
**sought
, int nr_sought
,
911 struct shallow_info
*si
,
912 char **pack_lockfile
)
914 struct ref
*ref
= copy_ref_list(orig_ref
);
915 struct object_id oid
;
916 const char *agent_feature
;
919 sort_ref_list(&ref
, ref_compare_name
);
920 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
922 if ((args
->depth
> 0 || is_repository_shallow()) && !server_supports("shallow"))
923 die(_("Server does not support shallow clients"));
924 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
926 if (server_supports("multi_ack_detailed")) {
927 print_verbose(args
, _("Server supports multi_ack_detailed"));
929 if (server_supports("no-done")) {
930 print_verbose(args
, _("Server supports no-done"));
931 if (args
->stateless_rpc
)
935 else if (server_supports("multi_ack")) {
936 print_verbose(args
, _("Server supports multi_ack"));
939 if (server_supports("side-band-64k")) {
940 print_verbose(args
, _("Server supports side-band-64k"));
943 else if (server_supports("side-band")) {
944 print_verbose(args
, _("Server supports side-band"));
947 if (server_supports("allow-tip-sha1-in-want")) {
948 print_verbose(args
, _("Server supports allow-tip-sha1-in-want"));
949 allow_unadvertised_object_request
|= ALLOW_TIP_SHA1
;
951 if (server_supports("allow-reachable-sha1-in-want")) {
952 print_verbose(args
, _("Server supports allow-reachable-sha1-in-want"));
953 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
955 if (!server_supports("thin-pack"))
956 args
->use_thin_pack
= 0;
957 if (!server_supports("no-progress"))
958 args
->no_progress
= 0;
959 if (!server_supports("include-tag"))
960 args
->include_tag
= 0;
961 if (server_supports("ofs-delta"))
962 print_verbose(args
, _("Server supports ofs-delta"));
964 prefer_ofs_delta
= 0;
966 if ((agent_feature
= server_feature_value("agent", &agent_len
))) {
969 print_verbose(args
, _("Server version is %.*s"),
970 agent_len
, agent_feature
);
972 if (server_supports("deepen-since"))
974 else if (args
->deepen_since
)
975 die(_("Server does not support --shallow-since"));
976 if (server_supports("deepen-not"))
978 else if (args
->deepen_not
)
979 die(_("Server does not support --shallow-exclude"));
980 if (!server_supports("deepen-relative") && args
->deepen_relative
)
981 die(_("Server does not support --deepen"));
983 if (everything_local(args
, &ref
, sought
, nr_sought
)) {
987 if (find_common(args
, fd
, &oid
, ref
) < 0)
988 if (!args
->keep_pack
)
989 /* When cloning, it is not unusual to have
992 warning(_("no common commits"));
994 if (args
->stateless_rpc
)
997 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
,
999 else if (si
->nr_ours
|| si
->nr_theirs
)
1000 alternate_shallow_file
= setup_temporary_shallow(si
->shallow
);
1002 alternate_shallow_file
= NULL
;
1003 if (get_pack(args
, fd
, pack_lockfile
))
1004 die(_("git fetch-pack: fetch failed."));
1010 static void fetch_pack_config(void)
1012 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit
);
1013 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit
);
1014 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta
);
1015 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects
);
1016 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects
);
1018 git_config(git_default_config
, NULL
);
1021 static void fetch_pack_setup(void)
1023 static int did_setup
;
1026 fetch_pack_config();
1027 if (0 <= transfer_unpack_limit
)
1028 unpack_limit
= transfer_unpack_limit
;
1029 else if (0 <= fetch_unpack_limit
)
1030 unpack_limit
= fetch_unpack_limit
;
1034 static int remove_duplicates_in_refs(struct ref
**ref
, int nr
)
1036 struct string_list names
= STRING_LIST_INIT_NODUP
;
1039 for (src
= dst
= 0; src
< nr
; src
++) {
1040 struct string_list_item
*item
;
1041 item
= string_list_insert(&names
, ref
[src
]->name
);
1043 continue; /* already have it */
1044 item
->util
= ref
[src
];
1046 ref
[dst
] = ref
[src
];
1049 for (src
= dst
; src
< nr
; src
++)
1051 string_list_clear(&names
, 0);
1055 static void update_shallow(struct fetch_pack_args
*args
,
1056 struct ref
**sought
, int nr_sought
,
1057 struct shallow_info
*si
)
1059 struct oid_array ref
= OID_ARRAY_INIT
;
1063 if (args
->deepen
&& alternate_shallow_file
) {
1064 if (*alternate_shallow_file
== '\0') { /* --unshallow */
1065 unlink_or_warn(git_path_shallow());
1066 rollback_lock_file(&shallow_lock
);
1068 commit_lock_file(&shallow_lock
);
1072 if (!si
->shallow
|| !si
->shallow
->nr
)
1075 if (args
->cloning
) {
1077 * remote is shallow, but this is a clone, there are
1078 * no objects in repo to worry about. Accept any
1079 * shallow points that exist in the pack (iow in repo
1080 * after get_pack() and reprepare_packed_git())
1082 struct oid_array extra
= OID_ARRAY_INIT
;
1083 struct object_id
*oid
= si
->shallow
->oid
;
1084 for (i
= 0; i
< si
->shallow
->nr
; i
++)
1085 if (has_object_file(&oid
[i
]))
1086 oid_array_append(&extra
, &oid
[i
]);
1088 setup_alternate_shallow(&shallow_lock
,
1089 &alternate_shallow_file
,
1091 commit_lock_file(&shallow_lock
);
1093 oid_array_clear(&extra
);
1097 if (!si
->nr_ours
&& !si
->nr_theirs
)
1100 remove_nonexistent_theirs_shallow(si
);
1101 if (!si
->nr_ours
&& !si
->nr_theirs
)
1103 for (i
= 0; i
< nr_sought
; i
++)
1104 oid_array_append(&ref
, &sought
[i
]->old_oid
);
1107 if (args
->update_shallow
) {
1109 * remote is also shallow, .git/shallow may be updated
1110 * so all refs can be accepted. Make sure we only add
1111 * shallow roots that are actually reachable from new
1114 struct oid_array extra
= OID_ARRAY_INIT
;
1115 struct object_id
*oid
= si
->shallow
->oid
;
1116 assign_shallow_commits_to_refs(si
, NULL
, NULL
);
1117 if (!si
->nr_ours
&& !si
->nr_theirs
) {
1118 oid_array_clear(&ref
);
1121 for (i
= 0; i
< si
->nr_ours
; i
++)
1122 oid_array_append(&extra
, &oid
[si
->ours
[i
]]);
1123 for (i
= 0; i
< si
->nr_theirs
; i
++)
1124 oid_array_append(&extra
, &oid
[si
->theirs
[i
]]);
1125 setup_alternate_shallow(&shallow_lock
,
1126 &alternate_shallow_file
,
1128 commit_lock_file(&shallow_lock
);
1129 oid_array_clear(&extra
);
1130 oid_array_clear(&ref
);
1135 * remote is also shallow, check what ref is safe to update
1136 * without updating .git/shallow
1138 status
= xcalloc(nr_sought
, sizeof(*status
));
1139 assign_shallow_commits_to_refs(si
, NULL
, status
);
1140 if (si
->nr_ours
|| si
->nr_theirs
) {
1141 for (i
= 0; i
< nr_sought
; i
++)
1143 sought
[i
]->status
= REF_STATUS_REJECT_SHALLOW
;
1146 oid_array_clear(&ref
);
1149 struct ref
*fetch_pack(struct fetch_pack_args
*args
,
1150 int fd
[], struct child_process
*conn
,
1151 const struct ref
*ref
,
1153 struct ref
**sought
, int nr_sought
,
1154 struct oid_array
*shallow
,
1155 char **pack_lockfile
)
1157 struct ref
*ref_cpy
;
1158 struct shallow_info si
;
1162 nr_sought
= remove_duplicates_in_refs(sought
, nr_sought
);
1165 packet_flush(fd
[1]);
1166 die(_("no matching remote head"));
1168 prepare_shallow_info(&si
, shallow
);
1169 ref_cpy
= do_fetch_pack(args
, fd
, ref
, sought
, nr_sought
,
1170 &si
, pack_lockfile
);
1171 reprepare_packed_git();
1172 update_shallow(args
, sought
, nr_sought
, &si
);
1173 clear_shallow_info(&si
);
1177 int report_unmatched_refs(struct ref
**sought
, int nr_sought
)
1181 for (i
= 0; i
< nr_sought
; i
++) {
1184 switch (sought
[i
]->match_status
) {
1187 case REF_NOT_MATCHED
:
1188 error(_("no such remote ref %s"), sought
[i
]->name
);
1190 case REF_UNADVERTISED_NOT_ALLOWED
:
1191 error(_("Server does not allow request for unadvertised object %s"),