2 #include "repository.h"
12 #include "fetch-pack.h"
14 #include "run-command.h"
16 #include "transport.h"
18 #include "sha1-array.h"
21 #include "fetch-negotiator.h"
23 static int transfer_unpack_limit
= -1;
24 static int fetch_unpack_limit
= -1;
25 static int unpack_limit
= 100;
26 static int prefer_ofs_delta
= 1;
28 static int deepen_since_ok
;
29 static int deepen_not_ok
;
30 static int fetch_fsck_objects
= -1;
31 static int transfer_fsck_objects
= -1;
32 static int agent_supported
;
33 static int server_supports_filtering
;
34 static struct lock_file shallow_lock
;
35 static const char *alternate_shallow_file
;
36 static char *negotiation_algorithm
;
38 /* Remember to update object flag allocation in object.h */
39 #define COMPLETE (1U << 0)
40 #define ALTERNATE (1U << 1)
43 * After sending this many "have"s if we do not get any new ACK , we
44 * give up traversing our history.
46 #define MAX_IN_VAIN 256
48 static int multi_ack
, use_sideband
;
49 /* Allow specifying sha1 if it is a ref tip. */
50 #define ALLOW_TIP_SHA1 01
51 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
52 #define ALLOW_REACHABLE_SHA1 02
53 static unsigned int allow_unadvertised_object_request
;
55 __attribute__((format (printf
, 2, 3)))
56 static inline void print_verbose(const struct fetch_pack_args
*args
,
64 va_start(params
, fmt
);
65 vfprintf(stderr
, fmt
, params
);
70 struct alternate_object_cache
{
71 struct object
**items
;
75 static void cache_one_alternate(const char *refname
,
76 const struct object_id
*oid
,
79 struct alternate_object_cache
*cache
= vcache
;
80 struct object
*obj
= parse_object(oid
);
82 if (!obj
|| (obj
->flags
& ALTERNATE
))
85 obj
->flags
|= ALTERNATE
;
86 ALLOC_GROW(cache
->items
, cache
->nr
+ 1, cache
->alloc
);
87 cache
->items
[cache
->nr
++] = obj
;
90 static void for_each_cached_alternate(struct fetch_negotiator
*negotiator
,
91 void (*cb
)(struct fetch_negotiator
*,
94 static int initialized
;
95 static struct alternate_object_cache cache
;
99 for_each_alternate_ref(cache_one_alternate
, &cache
);
103 for (i
= 0; i
< cache
.nr
; i
++)
104 cb(negotiator
, cache
.items
[i
]);
107 static int rev_list_insert_ref(struct fetch_negotiator
*negotiator
,
109 const struct object_id
*oid
)
111 struct object
*o
= deref_tag(parse_object(oid
), refname
, 0);
113 if (o
&& o
->type
== OBJ_COMMIT
)
114 negotiator
->add_tip(negotiator
, (struct commit
*)o
);
119 static int rev_list_insert_ref_oid(const char *refname
, const struct object_id
*oid
,
120 int flag
, void *cb_data
)
122 return rev_list_insert_ref(cb_data
, refname
, oid
);
133 static void consume_shallow_list(struct fetch_pack_args
*args
, int fd
)
135 if (args
->stateless_rpc
&& args
->deepen
) {
136 /* If we sent a depth we will get back "duplicate"
137 * shallow and unshallow commands every time there
138 * is a block of have lines exchanged.
141 while ((line
= packet_read_line(fd
, NULL
))) {
142 if (starts_with(line
, "shallow "))
144 if (starts_with(line
, "unshallow "))
146 die(_("git fetch-pack: expected shallow list"));
151 static enum ack_type
get_ack(int fd
, struct object_id
*result_oid
)
154 char *line
= packet_read_line(fd
, &len
);
158 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
159 if (!strcmp(line
, "NAK"))
161 if (skip_prefix(line
, "ACK ", &arg
)) {
162 if (!get_oid_hex(arg
, result_oid
)) {
167 if (strstr(arg
, "continue"))
169 if (strstr(arg
, "common"))
171 if (strstr(arg
, "ready"))
176 if (skip_prefix(line
, "ERR ", &arg
))
177 die(_("remote error: %s"), arg
);
178 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line
);
181 static void send_request(struct fetch_pack_args
*args
,
182 int fd
, struct strbuf
*buf
)
184 if (args
->stateless_rpc
) {
185 send_sideband(fd
, -1, buf
->buf
, buf
->len
, LARGE_PACKET_MAX
);
188 write_or_die(fd
, buf
->buf
, buf
->len
);
191 static void insert_one_alternate_object(struct fetch_negotiator
*negotiator
,
194 rev_list_insert_ref(negotiator
, NULL
, &obj
->oid
);
197 #define INITIAL_FLUSH 16
198 #define PIPESAFE_FLUSH 32
199 #define LARGE_FLUSH 16384
201 static int next_flush(int stateless_rpc
, int count
)
204 if (count
< LARGE_FLUSH
)
207 count
= count
* 11 / 10;
209 if (count
< PIPESAFE_FLUSH
)
212 count
+= PIPESAFE_FLUSH
;
217 static void mark_tips(struct fetch_negotiator
*negotiator
,
218 const struct oid_array
*negotiation_tips
)
222 if (!negotiation_tips
) {
223 for_each_ref(rev_list_insert_ref_oid
, negotiator
);
227 for (i
= 0; i
< negotiation_tips
->nr
; i
++)
228 rev_list_insert_ref(negotiator
, NULL
,
229 &negotiation_tips
->oid
[i
]);
233 static int find_common(struct fetch_negotiator
*negotiator
,
234 struct fetch_pack_args
*args
,
235 int fd
[2], struct object_id
*result_oid
,
239 int count
= 0, flushes
= 0, flush_at
= INITIAL_FLUSH
, retval
;
240 const struct object_id
*oid
;
241 unsigned in_vain
= 0;
242 int got_continue
= 0;
244 struct strbuf req_buf
= STRBUF_INIT
;
245 size_t state_len
= 0;
247 if (args
->stateless_rpc
&& multi_ack
== 1)
248 die(_("--stateless-rpc requires multi_ack_detailed"));
250 mark_tips(negotiator
, args
->negotiation_tips
);
251 for_each_cached_alternate(negotiator
, insert_one_alternate_object
);
254 for ( ; refs
; refs
= refs
->next
) {
255 struct object_id
*remote
= &refs
->old_oid
;
256 const char *remote_hex
;
260 * If that object is complete (i.e. it is an ancestor of a
261 * local ref), we tell them we have it but do not have to
262 * tell them about its ancestors, which they already know
265 * We use lookup_object here because we are only
266 * interested in the case we *know* the object is
267 * reachable and we have already scanned it.
269 if (((o
= lookup_object(remote
->hash
)) != NULL
) &&
270 (o
->flags
& COMPLETE
)) {
274 remote_hex
= oid_to_hex(remote
);
276 struct strbuf c
= STRBUF_INIT
;
277 if (multi_ack
== 2) strbuf_addstr(&c
, " multi_ack_detailed");
278 if (multi_ack
== 1) strbuf_addstr(&c
, " multi_ack");
279 if (no_done
) strbuf_addstr(&c
, " no-done");
280 if (use_sideband
== 2) strbuf_addstr(&c
, " side-band-64k");
281 if (use_sideband
== 1) strbuf_addstr(&c
, " side-band");
282 if (args
->deepen_relative
) strbuf_addstr(&c
, " deepen-relative");
283 if (args
->use_thin_pack
) strbuf_addstr(&c
, " thin-pack");
284 if (args
->no_progress
) strbuf_addstr(&c
, " no-progress");
285 if (args
->include_tag
) strbuf_addstr(&c
, " include-tag");
286 if (prefer_ofs_delta
) strbuf_addstr(&c
, " ofs-delta");
287 if (deepen_since_ok
) strbuf_addstr(&c
, " deepen-since");
288 if (deepen_not_ok
) strbuf_addstr(&c
, " deepen-not");
289 if (agent_supported
) strbuf_addf(&c
, " agent=%s",
290 git_user_agent_sanitized());
291 if (args
->filter_options
.choice
)
292 strbuf_addstr(&c
, " filter");
293 packet_buf_write(&req_buf
, "want %s%s\n", remote_hex
, c
.buf
);
296 packet_buf_write(&req_buf
, "want %s\n", remote_hex
);
301 strbuf_release(&req_buf
);
306 if (is_repository_shallow())
307 write_shallow_commits(&req_buf
, 1, NULL
);
309 packet_buf_write(&req_buf
, "deepen %d", args
->depth
);
310 if (args
->deepen_since
) {
311 timestamp_t max_age
= approxidate(args
->deepen_since
);
312 packet_buf_write(&req_buf
, "deepen-since %"PRItime
, max_age
);
314 if (args
->deepen_not
) {
316 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
317 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
318 packet_buf_write(&req_buf
, "deepen-not %s", s
->string
);
321 if (server_supports_filtering
&& args
->filter_options
.choice
)
322 packet_buf_write(&req_buf
, "filter %s",
323 args
->filter_options
.filter_spec
);
324 packet_buf_flush(&req_buf
);
325 state_len
= req_buf
.len
;
330 struct object_id oid
;
332 send_request(args
, fd
[1], &req_buf
);
333 while ((line
= packet_read_line(fd
[0], NULL
))) {
334 if (skip_prefix(line
, "shallow ", &arg
)) {
335 if (get_oid_hex(arg
, &oid
))
336 die(_("invalid shallow line: %s"), line
);
337 register_shallow(&oid
);
340 if (skip_prefix(line
, "unshallow ", &arg
)) {
341 if (get_oid_hex(arg
, &oid
))
342 die(_("invalid unshallow line: %s"), line
);
343 if (!lookup_object(oid
.hash
))
344 die(_("object not found: %s"), line
);
345 /* make sure that it is parsed as shallow */
346 if (!parse_object(&oid
))
347 die(_("error in object: %s"), line
);
348 if (unregister_shallow(&oid
))
349 die(_("no shallow found: %s"), line
);
352 die(_("expected shallow/unshallow, got %s"), line
);
354 } else if (!args
->stateless_rpc
)
355 send_request(args
, fd
[1], &req_buf
);
357 if (!args
->stateless_rpc
) {
358 /* If we aren't using the stateless-rpc interface
359 * we don't need to retain the headers.
361 strbuf_setlen(&req_buf
, 0);
367 if (args
->no_dependents
)
369 while ((oid
= negotiator
->next(negotiator
))) {
370 packet_buf_write(&req_buf
, "have %s\n", oid_to_hex(oid
));
371 print_verbose(args
, "have %s", oid_to_hex(oid
));
373 if (flush_at
<= ++count
) {
376 packet_buf_flush(&req_buf
);
377 send_request(args
, fd
[1], &req_buf
);
378 strbuf_setlen(&req_buf
, state_len
);
380 flush_at
= next_flush(args
->stateless_rpc
, count
);
383 * We keep one window "ahead" of the other side, and
384 * will wait for an ACK only on the next one
386 if (!args
->stateless_rpc
&& count
== INITIAL_FLUSH
)
389 consume_shallow_list(args
, fd
[0]);
391 ack
= get_ack(fd
[0], result_oid
);
393 print_verbose(args
, _("got %s %d %s"), "ack",
394 ack
, oid_to_hex(result_oid
));
404 struct commit
*commit
=
405 lookup_commit(result_oid
);
408 die(_("invalid commit %s"), oid_to_hex(result_oid
));
409 was_common
= negotiator
->ack(negotiator
, commit
);
410 if (args
->stateless_rpc
413 /* We need to replay the have for this object
414 * on the next RPC request so the peer knows
415 * it is in common with us.
417 const char *hex
= oid_to_hex(result_oid
);
418 packet_buf_write(&req_buf
, "have %s\n", hex
);
419 state_len
= req_buf
.len
;
421 * Reset in_vain because an ack
422 * for this commit has not been
426 } else if (!args
->stateless_rpc
427 || ack
!= ACK_common
)
431 if (ack
== ACK_ready
)
438 if (got_continue
&& MAX_IN_VAIN
< in_vain
) {
439 print_verbose(args
, _("giving up"));
447 if (!got_ready
|| !no_done
) {
448 packet_buf_write(&req_buf
, "done\n");
449 send_request(args
, fd
[1], &req_buf
);
451 print_verbose(args
, _("done"));
456 strbuf_release(&req_buf
);
458 if (!got_ready
|| !no_done
)
459 consume_shallow_list(args
, fd
[0]);
460 while (flushes
|| multi_ack
) {
461 int ack
= get_ack(fd
[0], result_oid
);
463 print_verbose(args
, _("got %s (%d) %s"), "ack",
464 ack
, oid_to_hex(result_oid
));
472 /* it is no error to fetch into a completely empty repo */
473 return count
? retval
: 0;
476 static struct commit_list
*complete
;
478 static int mark_complete(const struct object_id
*oid
)
480 struct object
*o
= parse_object(oid
);
482 while (o
&& o
->type
== OBJ_TAG
) {
483 struct tag
*t
= (struct tag
*) o
;
485 break; /* broken repository */
486 o
->flags
|= COMPLETE
;
487 o
= parse_object(&t
->tagged
->oid
);
489 if (o
&& o
->type
== OBJ_COMMIT
) {
490 struct commit
*commit
= (struct commit
*)o
;
491 if (!(commit
->object
.flags
& COMPLETE
)) {
492 commit
->object
.flags
|= COMPLETE
;
493 commit_list_insert(commit
, &complete
);
499 static int mark_complete_oid(const char *refname
, const struct object_id
*oid
,
500 int flag
, void *cb_data
)
502 return mark_complete(oid
);
505 static void mark_recent_complete_commits(struct fetch_pack_args
*args
,
508 while (complete
&& cutoff
<= complete
->item
->date
) {
509 print_verbose(args
, _("Marking %s as complete"),
510 oid_to_hex(&complete
->item
->object
.oid
));
511 pop_most_recent_commit(&complete
, COMPLETE
);
515 static void add_refs_to_oidset(struct oidset
*oids
, struct ref
*refs
)
517 for (; refs
; refs
= refs
->next
)
518 oidset_insert(oids
, &refs
->old_oid
);
521 static int tip_oids_contain(struct oidset
*tip_oids
,
522 struct ref
*unmatched
, struct ref
*newlist
,
523 const struct object_id
*id
)
526 * Note that this only looks at the ref lists the first time it's
527 * called. This works out in filter_refs() because even though it may
528 * add to "newlist" between calls, the additions will always be for
529 * oids that are already in the set.
531 if (!tip_oids
->map
.map
.tablesize
) {
532 add_refs_to_oidset(tip_oids
, unmatched
);
533 add_refs_to_oidset(tip_oids
, newlist
);
535 return oidset_contains(tip_oids
, id
);
538 static void filter_refs(struct fetch_pack_args
*args
,
540 struct ref
**sought
, int nr_sought
)
542 struct ref
*newlist
= NULL
;
543 struct ref
**newtail
= &newlist
;
544 struct ref
*unmatched
= NULL
;
545 struct ref
*ref
, *next
;
546 struct oidset tip_oids
= OIDSET_INIT
;
550 for (ref
= *refs
; ref
; ref
= next
) {
554 if (starts_with(ref
->name
, "refs/") &&
555 check_refname_format(ref
->name
, 0))
558 while (i
< nr_sought
) {
559 int cmp
= strcmp(ref
->name
, sought
[i
]->name
);
561 break; /* definitely do not have it */
563 keep
= 1; /* definitely have it */
564 sought
[i
]->match_status
= REF_MATCHED
;
570 if (!keep
&& args
->fetch_all
&&
571 (!args
->deepen
|| !starts_with(ref
->name
, "refs/tags/")))
577 newtail
= &ref
->next
;
579 ref
->next
= unmatched
;
584 /* Append unmatched requests to the list */
585 for (i
= 0; i
< nr_sought
; i
++) {
586 struct object_id oid
;
590 if (ref
->match_status
!= REF_NOT_MATCHED
)
592 if (parse_oid_hex(ref
->name
, &oid
, &p
) ||
594 oidcmp(&oid
, &ref
->old_oid
))
597 if ((allow_unadvertised_object_request
&
598 (ALLOW_TIP_SHA1
| ALLOW_REACHABLE_SHA1
)) ||
599 tip_oids_contain(&tip_oids
, unmatched
, newlist
,
601 ref
->match_status
= REF_MATCHED
;
602 *newtail
= copy_ref(ref
);
603 newtail
= &(*newtail
)->next
;
605 ref
->match_status
= REF_UNADVERTISED_NOT_ALLOWED
;
609 oidset_clear(&tip_oids
);
610 for (ref
= unmatched
; ref
; ref
= next
) {
618 static void mark_alternate_complete(struct fetch_negotiator
*unused
,
621 mark_complete(&obj
->oid
);
624 struct loose_object_iter
{
625 struct oidset
*loose_object_set
;
630 * If the number of refs is not larger than the number of loose objects,
631 * this function stops inserting.
633 static int add_loose_objects_to_set(const struct object_id
*oid
,
637 struct loose_object_iter
*iter
= data
;
638 oidset_insert(iter
->loose_object_set
, oid
);
639 if (iter
->refs
== NULL
)
642 iter
->refs
= iter
->refs
->next
;
647 * Mark recent commits available locally and reachable from a local ref as
648 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
649 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
650 * thus do not need COMMON_REF marks).
652 * The cutoff time for recency is determined by this heuristic: it is the
653 * earliest commit time of the objects in refs that are commits and that we know
654 * the commit time of.
656 static void mark_complete_and_common_ref(struct fetch_negotiator
*negotiator
,
657 struct fetch_pack_args
*args
,
661 int old_save_commit_buffer
= save_commit_buffer
;
662 timestamp_t cutoff
= 0;
663 struct oidset loose_oid_set
= OIDSET_INIT
;
665 struct loose_object_iter iter
= {&loose_oid_set
, *refs
};
667 /* Enumerate all loose objects or know refs are not so many. */
668 use_oidset
= !for_each_loose_object(add_loose_objects_to_set
,
671 save_commit_buffer
= 0;
673 for (ref
= *refs
; ref
; ref
= ref
->next
) {
675 unsigned int flags
= OBJECT_INFO_QUICK
;
678 !oidset_contains(&loose_oid_set
, &ref
->old_oid
)) {
680 * I know this does not exist in the loose form,
681 * so check if it exists in a non-loose form.
683 flags
|= OBJECT_INFO_IGNORE_LOOSE
;
686 if (!has_object_file_with_flags(&ref
->old_oid
, flags
))
688 o
= parse_object(&ref
->old_oid
);
692 /* We already have it -- which may mean that we were
693 * in sync with the other side at some time after
694 * that (it is OK if we guess wrong here).
696 if (o
->type
== OBJ_COMMIT
) {
697 struct commit
*commit
= (struct commit
*)o
;
698 if (!cutoff
|| cutoff
< commit
->date
)
699 cutoff
= commit
->date
;
703 oidset_clear(&loose_oid_set
);
705 if (!args
->no_dependents
) {
707 for_each_ref(mark_complete_oid
, NULL
);
708 for_each_cached_alternate(NULL
, mark_alternate_complete
);
709 commit_list_sort_by_date(&complete
);
711 mark_recent_complete_commits(args
, cutoff
);
715 * Mark all complete remote refs as common refs.
716 * Don't mark them common yet; the server has to be told so first.
718 for (ref
= *refs
; ref
; ref
= ref
->next
) {
719 struct object
*o
= deref_tag(lookup_object(ref
->old_oid
.hash
),
722 if (!o
|| o
->type
!= OBJ_COMMIT
|| !(o
->flags
& COMPLETE
))
725 negotiator
->known_common(negotiator
,
730 save_commit_buffer
= old_save_commit_buffer
;
734 * Returns 1 if every object pointed to by the given remote refs is available
735 * locally and reachable from a local ref, and 0 otherwise.
737 static int everything_local(struct fetch_pack_args
*args
,
743 for (retval
= 1, ref
= *refs
; ref
; ref
= ref
->next
) {
744 const struct object_id
*remote
= &ref
->old_oid
;
747 o
= lookup_object(remote
->hash
);
748 if (!o
|| !(o
->flags
& COMPLETE
)) {
750 print_verbose(args
, "want %s (%s)", oid_to_hex(remote
),
754 print_verbose(args
, _("already have %s (%s)"), oid_to_hex(remote
),
761 static int sideband_demux(int in
, int out
, void *data
)
766 ret
= recv_sideband("fetch-pack", xd
[0], out
);
771 static int get_pack(struct fetch_pack_args
*args
,
772 int xd
[2], char **pack_lockfile
)
775 int do_keep
= args
->keep_pack
;
776 const char *cmd_name
;
777 struct pack_header header
;
779 struct child_process cmd
= CHILD_PROCESS_INIT
;
782 memset(&demux
, 0, sizeof(demux
));
784 /* xd[] is talking with upload-pack; subprocess reads from
785 * xd[0], spits out band#2 to stderr, and feeds us band#1
786 * through demux->out.
788 demux
.proc
= sideband_demux
;
791 demux
.isolate_sigpipe
= 1;
792 if (start_async(&demux
))
793 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
798 if (!args
->keep_pack
&& unpack_limit
) {
800 if (read_pack_header(demux
.out
, &header
))
801 die(_("protocol error: bad pack header"));
803 if (ntohl(header
.hdr_entries
) < unpack_limit
)
809 if (alternate_shallow_file
) {
810 argv_array_push(&cmd
.args
, "--shallow-file");
811 argv_array_push(&cmd
.args
, alternate_shallow_file
);
814 if (do_keep
|| args
->from_promisor
) {
817 cmd_name
= "index-pack";
818 argv_array_push(&cmd
.args
, cmd_name
);
819 argv_array_push(&cmd
.args
, "--stdin");
820 if (!args
->quiet
&& !args
->no_progress
)
821 argv_array_push(&cmd
.args
, "-v");
822 if (args
->use_thin_pack
)
823 argv_array_push(&cmd
.args
, "--fix-thin");
824 if (do_keep
&& (args
->lock_pack
|| unpack_limit
)) {
825 char hostname
[HOST_NAME_MAX
+ 1];
826 if (xgethostname(hostname
, sizeof(hostname
)))
827 xsnprintf(hostname
, sizeof(hostname
), "localhost");
828 argv_array_pushf(&cmd
.args
,
829 "--keep=fetch-pack %"PRIuMAX
" on %s",
830 (uintmax_t)getpid(), hostname
);
832 if (args
->check_self_contained_and_connected
)
833 argv_array_push(&cmd
.args
, "--check-self-contained-and-connected");
834 if (args
->from_promisor
)
835 argv_array_push(&cmd
.args
, "--promisor");
838 cmd_name
= "unpack-objects";
839 argv_array_push(&cmd
.args
, cmd_name
);
840 if (args
->quiet
|| args
->no_progress
)
841 argv_array_push(&cmd
.args
, "-q");
842 args
->check_self_contained_and_connected
= 0;
846 argv_array_pushf(&cmd
.args
, "--pack_header=%"PRIu32
",%"PRIu32
,
847 ntohl(header
.hdr_version
),
848 ntohl(header
.hdr_entries
));
849 if (fetch_fsck_objects
>= 0
851 : transfer_fsck_objects
>= 0
852 ? transfer_fsck_objects
854 if (args
->from_promisor
)
856 * We cannot use --strict in index-pack because it
857 * checks both broken objects and links, but we only
858 * want to check for broken objects.
860 argv_array_push(&cmd
.args
, "--fsck-objects");
862 argv_array_push(&cmd
.args
, "--strict");
867 if (start_command(&cmd
))
868 die(_("fetch-pack: unable to fork off %s"), cmd_name
);
869 if (do_keep
&& pack_lockfile
) {
870 *pack_lockfile
= index_pack_lockfile(cmd
.out
);
875 /* Closed by start_command() */
878 ret
= finish_command(&cmd
);
879 if (!ret
|| (args
->check_self_contained_and_connected
&& ret
== 1))
880 args
->self_contained_and_connected
=
881 args
->check_self_contained_and_connected
&&
884 die(_("%s failed"), cmd_name
);
885 if (use_sideband
&& finish_async(&demux
))
886 die(_("error in sideband demultiplexer"));
890 static int cmp_ref_by_name(const void *a_
, const void *b_
)
892 const struct ref
*a
= *((const struct ref
**)a_
);
893 const struct ref
*b
= *((const struct ref
**)b_
);
894 return strcmp(a
->name
, b
->name
);
897 static struct ref
*do_fetch_pack(struct fetch_pack_args
*args
,
899 const struct ref
*orig_ref
,
900 struct ref
**sought
, int nr_sought
,
901 struct shallow_info
*si
,
902 char **pack_lockfile
)
904 struct ref
*ref
= copy_ref_list(orig_ref
);
905 struct object_id oid
;
906 const char *agent_feature
;
908 struct fetch_negotiator negotiator
;
909 fetch_negotiator_init(&negotiator
, negotiation_algorithm
);
911 sort_ref_list(&ref
, ref_compare_name
);
912 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
914 if ((args
->depth
> 0 || is_repository_shallow()) && !server_supports("shallow"))
915 die(_("Server does not support shallow clients"));
916 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
918 if (server_supports("multi_ack_detailed")) {
919 print_verbose(args
, _("Server supports multi_ack_detailed"));
921 if (server_supports("no-done")) {
922 print_verbose(args
, _("Server supports no-done"));
923 if (args
->stateless_rpc
)
927 else if (server_supports("multi_ack")) {
928 print_verbose(args
, _("Server supports multi_ack"));
931 if (server_supports("side-band-64k")) {
932 print_verbose(args
, _("Server supports side-band-64k"));
935 else if (server_supports("side-band")) {
936 print_verbose(args
, _("Server supports side-band"));
939 if (server_supports("allow-tip-sha1-in-want")) {
940 print_verbose(args
, _("Server supports allow-tip-sha1-in-want"));
941 allow_unadvertised_object_request
|= ALLOW_TIP_SHA1
;
943 if (server_supports("allow-reachable-sha1-in-want")) {
944 print_verbose(args
, _("Server supports allow-reachable-sha1-in-want"));
945 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
947 if (!server_supports("thin-pack"))
948 args
->use_thin_pack
= 0;
949 if (!server_supports("no-progress"))
950 args
->no_progress
= 0;
951 if (!server_supports("include-tag"))
952 args
->include_tag
= 0;
953 if (server_supports("ofs-delta"))
954 print_verbose(args
, _("Server supports ofs-delta"));
956 prefer_ofs_delta
= 0;
958 if (server_supports("filter")) {
959 server_supports_filtering
= 1;
960 print_verbose(args
, _("Server supports filter"));
961 } else if (args
->filter_options
.choice
) {
962 warning("filtering not recognized by server, ignoring");
965 if ((agent_feature
= server_feature_value("agent", &agent_len
))) {
968 print_verbose(args
, _("Server version is %.*s"),
969 agent_len
, agent_feature
);
971 if (server_supports("deepen-since"))
973 else if (args
->deepen_since
)
974 die(_("Server does not support --shallow-since"));
975 if (server_supports("deepen-not"))
977 else if (args
->deepen_not
)
978 die(_("Server does not support --shallow-exclude"));
979 if (!server_supports("deepen-relative") && args
->deepen_relative
)
980 die(_("Server does not support --deepen"));
982 mark_complete_and_common_ref(&negotiator
, args
, &ref
);
983 filter_refs(args
, &ref
, sought
, nr_sought
);
984 if (everything_local(args
, &ref
)) {
988 if (find_common(&negotiator
, args
, fd
, &oid
, ref
) < 0)
989 if (!args
->keep_pack
)
990 /* When cloning, it is not unusual to have
993 warning(_("no common commits"));
995 if (args
->stateless_rpc
)
998 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
,
1000 else if (si
->nr_ours
|| si
->nr_theirs
)
1001 alternate_shallow_file
= setup_temporary_shallow(si
->shallow
);
1003 alternate_shallow_file
= NULL
;
1004 if (get_pack(args
, fd
, pack_lockfile
))
1005 die(_("git fetch-pack: fetch failed."));
1008 negotiator
.release(&negotiator
);
1012 static void add_shallow_requests(struct strbuf
*req_buf
,
1013 const struct fetch_pack_args
*args
)
1015 if (is_repository_shallow())
1016 write_shallow_commits(req_buf
, 1, NULL
);
1017 if (args
->depth
> 0)
1018 packet_buf_write(req_buf
, "deepen %d", args
->depth
);
1019 if (args
->deepen_since
) {
1020 timestamp_t max_age
= approxidate(args
->deepen_since
);
1021 packet_buf_write(req_buf
, "deepen-since %"PRItime
, max_age
);
1023 if (args
->deepen_not
) {
1025 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
1026 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
1027 packet_buf_write(req_buf
, "deepen-not %s", s
->string
);
1032 static void add_wants(const struct ref
*wants
, struct strbuf
*req_buf
)
1034 for ( ; wants
; wants
= wants
->next
) {
1035 const struct object_id
*remote
= &wants
->old_oid
;
1036 const char *remote_hex
;
1040 * If that object is complete (i.e. it is an ancestor of a
1041 * local ref), we tell them we have it but do not have to
1042 * tell them about its ancestors, which they already know
1045 * We use lookup_object here because we are only
1046 * interested in the case we *know* the object is
1047 * reachable and we have already scanned it.
1049 if (((o
= lookup_object(remote
->hash
)) != NULL
) &&
1050 (o
->flags
& COMPLETE
)) {
1054 remote_hex
= oid_to_hex(remote
);
1055 packet_buf_write(req_buf
, "want %s\n", remote_hex
);
1059 static void add_common(struct strbuf
*req_buf
, struct oidset
*common
)
1061 struct oidset_iter iter
;
1062 const struct object_id
*oid
;
1063 oidset_iter_init(common
, &iter
);
1065 while ((oid
= oidset_iter_next(&iter
))) {
1066 packet_buf_write(req_buf
, "have %s\n", oid_to_hex(oid
));
1070 static int add_haves(struct fetch_negotiator
*negotiator
,
1071 struct strbuf
*req_buf
,
1072 int *haves_to_send
, int *in_vain
)
1075 int haves_added
= 0;
1076 const struct object_id
*oid
;
1078 while ((oid
= negotiator
->next(negotiator
))) {
1079 packet_buf_write(req_buf
, "have %s\n", oid_to_hex(oid
));
1080 if (++haves_added
>= *haves_to_send
)
1084 *in_vain
+= haves_added
;
1085 if (!haves_added
|| *in_vain
>= MAX_IN_VAIN
) {
1087 packet_buf_write(req_buf
, "done\n");
1091 /* Increase haves to send on next round */
1092 *haves_to_send
= next_flush(1, *haves_to_send
);
1097 static int send_fetch_request(struct fetch_negotiator
*negotiator
, int fd_out
,
1098 const struct fetch_pack_args
*args
,
1099 const struct ref
*wants
, struct oidset
*common
,
1100 int *haves_to_send
, int *in_vain
)
1103 struct strbuf req_buf
= STRBUF_INIT
;
1105 if (server_supports_v2("fetch", 1))
1106 packet_buf_write(&req_buf
, "command=fetch");
1107 if (server_supports_v2("agent", 0))
1108 packet_buf_write(&req_buf
, "agent=%s", git_user_agent_sanitized());
1109 if (args
->server_options
&& args
->server_options
->nr
&&
1110 server_supports_v2("server-option", 1)) {
1112 for (i
= 0; i
< args
->server_options
->nr
; i
++)
1113 packet_write_fmt(fd_out
, "server-option=%s",
1114 args
->server_options
->items
[i
].string
);
1117 packet_buf_delim(&req_buf
);
1118 if (args
->use_thin_pack
)
1119 packet_buf_write(&req_buf
, "thin-pack");
1120 if (args
->no_progress
)
1121 packet_buf_write(&req_buf
, "no-progress");
1122 if (args
->include_tag
)
1123 packet_buf_write(&req_buf
, "include-tag");
1124 if (prefer_ofs_delta
)
1125 packet_buf_write(&req_buf
, "ofs-delta");
1127 /* Add shallow-info and deepen request */
1128 if (server_supports_feature("fetch", "shallow", 0))
1129 add_shallow_requests(&req_buf
, args
);
1130 else if (is_repository_shallow() || args
->deepen
)
1131 die(_("Server does not support shallow requests"));
1134 if (server_supports_feature("fetch", "filter", 0) &&
1135 args
->filter_options
.choice
) {
1136 print_verbose(args
, _("Server supports filter"));
1137 packet_buf_write(&req_buf
, "filter %s",
1138 args
->filter_options
.filter_spec
);
1139 } else if (args
->filter_options
.choice
) {
1140 warning("filtering not recognized by server, ignoring");
1144 add_wants(wants
, &req_buf
);
1146 if (args
->no_dependents
) {
1147 packet_buf_write(&req_buf
, "done");
1150 /* Add all of the common commits we've found in previous rounds */
1151 add_common(&req_buf
, common
);
1153 /* Add initial haves */
1154 ret
= add_haves(negotiator
, &req_buf
, haves_to_send
, in_vain
);
1158 packet_buf_flush(&req_buf
);
1159 write_or_die(fd_out
, req_buf
.buf
, req_buf
.len
);
1161 strbuf_release(&req_buf
);
1166 * Processes a section header in a server's response and checks if it matches
1167 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1168 * not consumed); if 0, the line will be consumed and the function will die if
1169 * the section header doesn't match what was expected.
1171 static int process_section_header(struct packet_reader
*reader
,
1172 const char *section
, int peek
)
1176 if (packet_reader_peek(reader
) != PACKET_READ_NORMAL
)
1177 die("error reading section header '%s'", section
);
1179 ret
= !strcmp(reader
->line
, section
);
1183 die("expected '%s', received '%s'",
1184 section
, reader
->line
);
1185 packet_reader_read(reader
);
1191 static int process_acks(struct fetch_negotiator
*negotiator
,
1192 struct packet_reader
*reader
,
1193 struct oidset
*common
)
1196 int received_ready
= 0;
1197 int received_ack
= 0;
1199 process_section_header(reader
, "acknowledgments", 0);
1200 while (packet_reader_read(reader
) == PACKET_READ_NORMAL
) {
1203 if (!strcmp(reader
->line
, "NAK"))
1206 if (skip_prefix(reader
->line
, "ACK ", &arg
)) {
1207 struct object_id oid
;
1208 if (!get_oid_hex(arg
, &oid
)) {
1209 struct commit
*commit
;
1210 oidset_insert(common
, &oid
);
1211 commit
= lookup_commit(&oid
);
1212 negotiator
->ack(negotiator
, commit
);
1217 if (!strcmp(reader
->line
, "ready")) {
1222 die("unexpected acknowledgment line: '%s'", reader
->line
);
1225 if (reader
->status
!= PACKET_READ_FLUSH
&&
1226 reader
->status
!= PACKET_READ_DELIM
)
1227 die("error processing acks: %d", reader
->status
);
1229 /* return 0 if no common, 1 if there are common, or 2 if ready */
1230 return received_ready
? 2 : (received_ack
? 1 : 0);
1233 static void receive_shallow_info(struct fetch_pack_args
*args
,
1234 struct packet_reader
*reader
)
1236 process_section_header(reader
, "shallow-info", 0);
1237 while (packet_reader_read(reader
) == PACKET_READ_NORMAL
) {
1239 struct object_id oid
;
1241 if (skip_prefix(reader
->line
, "shallow ", &arg
)) {
1242 if (get_oid_hex(arg
, &oid
))
1243 die(_("invalid shallow line: %s"), reader
->line
);
1244 register_shallow(&oid
);
1247 if (skip_prefix(reader
->line
, "unshallow ", &arg
)) {
1248 if (get_oid_hex(arg
, &oid
))
1249 die(_("invalid unshallow line: %s"), reader
->line
);
1250 if (!lookup_object(oid
.hash
))
1251 die(_("object not found: %s"), reader
->line
);
1252 /* make sure that it is parsed as shallow */
1253 if (!parse_object(&oid
))
1254 die(_("error in object: %s"), reader
->line
);
1255 if (unregister_shallow(&oid
))
1256 die(_("no shallow found: %s"), reader
->line
);
1259 die(_("expected shallow/unshallow, got %s"), reader
->line
);
1262 if (reader
->status
!= PACKET_READ_FLUSH
&&
1263 reader
->status
!= PACKET_READ_DELIM
)
1264 die("error processing shallow info: %d", reader
->status
);
1266 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
, NULL
);
1271 FETCH_CHECK_LOCAL
= 0,
1278 static struct ref
*do_fetch_pack_v2(struct fetch_pack_args
*args
,
1280 const struct ref
*orig_ref
,
1281 struct ref
**sought
, int nr_sought
,
1282 char **pack_lockfile
)
1284 struct ref
*ref
= copy_ref_list(orig_ref
);
1285 enum fetch_state state
= FETCH_CHECK_LOCAL
;
1286 struct oidset common
= OIDSET_INIT
;
1287 struct packet_reader reader
;
1289 int haves_to_send
= INITIAL_FLUSH
;
1290 struct fetch_negotiator negotiator
;
1291 fetch_negotiator_init(&negotiator
, negotiation_algorithm
);
1292 packet_reader_init(&reader
, fd
[0], NULL
, 0,
1293 PACKET_READ_CHOMP_NEWLINE
);
1295 while (state
!= FETCH_DONE
) {
1297 case FETCH_CHECK_LOCAL
:
1298 sort_ref_list(&ref
, ref_compare_name
);
1299 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
1301 /* v2 supports these by default */
1302 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
1304 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
1307 /* Filter 'ref' by 'sought' and those that aren't local */
1308 mark_complete_and_common_ref(&negotiator
, args
, &ref
);
1309 filter_refs(args
, &ref
, sought
, nr_sought
);
1310 if (everything_local(args
, &ref
))
1313 state
= FETCH_SEND_REQUEST
;
1315 mark_tips(&negotiator
, args
->negotiation_tips
);
1316 for_each_cached_alternate(&negotiator
,
1317 insert_one_alternate_object
);
1319 case FETCH_SEND_REQUEST
:
1320 if (send_fetch_request(&negotiator
, fd
[1], args
, ref
,
1322 &haves_to_send
, &in_vain
))
1323 state
= FETCH_GET_PACK
;
1325 state
= FETCH_PROCESS_ACKS
;
1327 case FETCH_PROCESS_ACKS
:
1328 /* Process ACKs/NAKs */
1329 switch (process_acks(&negotiator
, &reader
, &common
)) {
1331 state
= FETCH_GET_PACK
;
1337 state
= FETCH_SEND_REQUEST
;
1341 case FETCH_GET_PACK
:
1342 /* Check for shallow-info section */
1343 if (process_section_header(&reader
, "shallow-info", 1))
1344 receive_shallow_info(args
, &reader
);
1347 process_section_header(&reader
, "packfile", 0);
1348 if (get_pack(args
, fd
, pack_lockfile
))
1349 die(_("git fetch-pack: fetch failed."));
1358 negotiator
.release(&negotiator
);
1359 oidset_clear(&common
);
1363 static void fetch_pack_config(void)
1365 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit
);
1366 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit
);
1367 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta
);
1368 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects
);
1369 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects
);
1370 git_config_get_string("fetch.negotiationalgorithm",
1371 &negotiation_algorithm
);
1373 git_config(git_default_config
, NULL
);
1376 static void fetch_pack_setup(void)
1378 static int did_setup
;
1381 fetch_pack_config();
1382 if (0 <= transfer_unpack_limit
)
1383 unpack_limit
= transfer_unpack_limit
;
1384 else if (0 <= fetch_unpack_limit
)
1385 unpack_limit
= fetch_unpack_limit
;
1389 static int remove_duplicates_in_refs(struct ref
**ref
, int nr
)
1391 struct string_list names
= STRING_LIST_INIT_NODUP
;
1394 for (src
= dst
= 0; src
< nr
; src
++) {
1395 struct string_list_item
*item
;
1396 item
= string_list_insert(&names
, ref
[src
]->name
);
1398 continue; /* already have it */
1399 item
->util
= ref
[src
];
1401 ref
[dst
] = ref
[src
];
1404 for (src
= dst
; src
< nr
; src
++)
1406 string_list_clear(&names
, 0);
1410 static void update_shallow(struct fetch_pack_args
*args
,
1411 struct ref
**sought
, int nr_sought
,
1412 struct shallow_info
*si
)
1414 struct oid_array ref
= OID_ARRAY_INIT
;
1418 if (args
->deepen
&& alternate_shallow_file
) {
1419 if (*alternate_shallow_file
== '\0') { /* --unshallow */
1420 unlink_or_warn(git_path_shallow());
1421 rollback_lock_file(&shallow_lock
);
1423 commit_lock_file(&shallow_lock
);
1427 if (!si
->shallow
|| !si
->shallow
->nr
)
1430 if (args
->cloning
) {
1432 * remote is shallow, but this is a clone, there are
1433 * no objects in repo to worry about. Accept any
1434 * shallow points that exist in the pack (iow in repo
1435 * after get_pack() and reprepare_packed_git())
1437 struct oid_array extra
= OID_ARRAY_INIT
;
1438 struct object_id
*oid
= si
->shallow
->oid
;
1439 for (i
= 0; i
< si
->shallow
->nr
; i
++)
1440 if (has_object_file(&oid
[i
]))
1441 oid_array_append(&extra
, &oid
[i
]);
1443 setup_alternate_shallow(&shallow_lock
,
1444 &alternate_shallow_file
,
1446 commit_lock_file(&shallow_lock
);
1448 oid_array_clear(&extra
);
1452 if (!si
->nr_ours
&& !si
->nr_theirs
)
1455 remove_nonexistent_theirs_shallow(si
);
1456 if (!si
->nr_ours
&& !si
->nr_theirs
)
1458 for (i
= 0; i
< nr_sought
; i
++)
1459 oid_array_append(&ref
, &sought
[i
]->old_oid
);
1462 if (args
->update_shallow
) {
1464 * remote is also shallow, .git/shallow may be updated
1465 * so all refs can be accepted. Make sure we only add
1466 * shallow roots that are actually reachable from new
1469 struct oid_array extra
= OID_ARRAY_INIT
;
1470 struct object_id
*oid
= si
->shallow
->oid
;
1471 assign_shallow_commits_to_refs(si
, NULL
, NULL
);
1472 if (!si
->nr_ours
&& !si
->nr_theirs
) {
1473 oid_array_clear(&ref
);
1476 for (i
= 0; i
< si
->nr_ours
; i
++)
1477 oid_array_append(&extra
, &oid
[si
->ours
[i
]]);
1478 for (i
= 0; i
< si
->nr_theirs
; i
++)
1479 oid_array_append(&extra
, &oid
[si
->theirs
[i
]]);
1480 setup_alternate_shallow(&shallow_lock
,
1481 &alternate_shallow_file
,
1483 commit_lock_file(&shallow_lock
);
1484 oid_array_clear(&extra
);
1485 oid_array_clear(&ref
);
1490 * remote is also shallow, check what ref is safe to update
1491 * without updating .git/shallow
1493 status
= xcalloc(nr_sought
, sizeof(*status
));
1494 assign_shallow_commits_to_refs(si
, NULL
, status
);
1495 if (si
->nr_ours
|| si
->nr_theirs
) {
1496 for (i
= 0; i
< nr_sought
; i
++)
1498 sought
[i
]->status
= REF_STATUS_REJECT_SHALLOW
;
1501 oid_array_clear(&ref
);
1504 struct ref
*fetch_pack(struct fetch_pack_args
*args
,
1505 int fd
[], struct child_process
*conn
,
1506 const struct ref
*ref
,
1508 struct ref
**sought
, int nr_sought
,
1509 struct oid_array
*shallow
,
1510 char **pack_lockfile
,
1511 enum protocol_version version
)
1513 struct ref
*ref_cpy
;
1514 struct shallow_info si
;
1518 nr_sought
= remove_duplicates_in_refs(sought
, nr_sought
);
1521 packet_flush(fd
[1]);
1522 die(_("no matching remote head"));
1524 prepare_shallow_info(&si
, shallow
);
1525 if (version
== protocol_v2
)
1526 ref_cpy
= do_fetch_pack_v2(args
, fd
, ref
, sought
, nr_sought
,
1529 ref_cpy
= do_fetch_pack(args
, fd
, ref
, sought
, nr_sought
,
1530 &si
, pack_lockfile
);
1531 reprepare_packed_git(the_repository
);
1532 update_shallow(args
, sought
, nr_sought
, &si
);
1533 clear_shallow_info(&si
);
1537 int report_unmatched_refs(struct ref
**sought
, int nr_sought
)
1541 for (i
= 0; i
< nr_sought
; i
++) {
1544 switch (sought
[i
]->match_status
) {
1547 case REF_NOT_MATCHED
:
1548 error(_("no such remote ref %s"), sought
[i
]->name
);
1550 case REF_UNADVERTISED_NOT_ALLOWED
:
1551 error(_("Server does not allow request for unadvertised object %s"),