2 #include "repository.h"
12 #include "fetch-pack.h"
14 #include "run-command.h"
16 #include "transport.h"
18 #include "sha1-array.h"
21 #include "object-store.h"
22 #include "connected.h"
23 #include "fetch-negotiator.h"
26 static int transfer_unpack_limit
= -1;
27 static int fetch_unpack_limit
= -1;
28 static int unpack_limit
= 100;
29 static int prefer_ofs_delta
= 1;
31 static int deepen_since_ok
;
32 static int deepen_not_ok
;
33 static int fetch_fsck_objects
= -1;
34 static int transfer_fsck_objects
= -1;
35 static int agent_supported
;
36 static int server_supports_filtering
;
37 static struct lock_file shallow_lock
;
38 static const char *alternate_shallow_file
;
39 static char *negotiation_algorithm
;
40 static struct strbuf fsck_msg_types
= STRBUF_INIT
;
42 /* Remember to update object flag allocation in object.h */
43 #define COMPLETE (1U << 0)
44 #define ALTERNATE (1U << 1)
47 * After sending this many "have"s if we do not get any new ACK , we
48 * give up traversing our history.
50 #define MAX_IN_VAIN 256
52 static int multi_ack
, use_sideband
;
53 /* Allow specifying sha1 if it is a ref tip. */
54 #define ALLOW_TIP_SHA1 01
55 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
56 #define ALLOW_REACHABLE_SHA1 02
57 static unsigned int allow_unadvertised_object_request
;
59 __attribute__((format (printf
, 2, 3)))
60 static inline void print_verbose(const struct fetch_pack_args
*args
,
68 va_start(params
, fmt
);
69 vfprintf(stderr
, fmt
, params
);
74 struct alternate_object_cache
{
75 struct object
**items
;
79 static void cache_one_alternate(const struct object_id
*oid
,
82 struct alternate_object_cache
*cache
= vcache
;
83 struct object
*obj
= parse_object(the_repository
, oid
);
85 if (!obj
|| (obj
->flags
& ALTERNATE
))
88 obj
->flags
|= ALTERNATE
;
89 ALLOC_GROW(cache
->items
, cache
->nr
+ 1, cache
->alloc
);
90 cache
->items
[cache
->nr
++] = obj
;
93 static void for_each_cached_alternate(struct fetch_negotiator
*negotiator
,
94 void (*cb
)(struct fetch_negotiator
*,
97 static int initialized
;
98 static struct alternate_object_cache cache
;
102 for_each_alternate_ref(cache_one_alternate
, &cache
);
106 for (i
= 0; i
< cache
.nr
; i
++)
107 cb(negotiator
, cache
.items
[i
]);
110 static int rev_list_insert_ref(struct fetch_negotiator
*negotiator
,
112 const struct object_id
*oid
)
114 struct object
*o
= deref_tag(the_repository
,
115 parse_object(the_repository
, oid
),
118 if (o
&& o
->type
== OBJ_COMMIT
)
119 negotiator
->add_tip(negotiator
, (struct commit
*)o
);
124 static int rev_list_insert_ref_oid(const char *refname
, const struct object_id
*oid
,
125 int flag
, void *cb_data
)
127 return rev_list_insert_ref(cb_data
, refname
, oid
);
138 static void consume_shallow_list(struct fetch_pack_args
*args
, int fd
)
140 if (args
->stateless_rpc
&& args
->deepen
) {
141 /* If we sent a depth we will get back "duplicate"
142 * shallow and unshallow commands every time there
143 * is a block of have lines exchanged.
146 while ((line
= packet_read_line(fd
, NULL
))) {
147 if (starts_with(line
, "shallow "))
149 if (starts_with(line
, "unshallow "))
151 die(_("git fetch-pack: expected shallow list"));
156 static enum ack_type
get_ack(int fd
, struct object_id
*result_oid
)
159 char *line
= packet_read_line(fd
, &len
);
163 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
164 if (!strcmp(line
, "NAK"))
166 if (skip_prefix(line
, "ACK ", &arg
)) {
167 if (!get_oid_hex(arg
, result_oid
)) {
172 if (strstr(arg
, "continue"))
174 if (strstr(arg
, "common"))
176 if (strstr(arg
, "ready"))
181 if (skip_prefix(line
, "ERR ", &arg
))
182 die(_("remote error: %s"), arg
);
183 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line
);
186 static void send_request(struct fetch_pack_args
*args
,
187 int fd
, struct strbuf
*buf
)
189 if (args
->stateless_rpc
) {
190 send_sideband(fd
, -1, buf
->buf
, buf
->len
, LARGE_PACKET_MAX
);
193 write_or_die(fd
, buf
->buf
, buf
->len
);
196 static void insert_one_alternate_object(struct fetch_negotiator
*negotiator
,
199 rev_list_insert_ref(negotiator
, NULL
, &obj
->oid
);
202 #define INITIAL_FLUSH 16
203 #define PIPESAFE_FLUSH 32
204 #define LARGE_FLUSH 16384
206 static int next_flush(int stateless_rpc
, int count
)
209 if (count
< LARGE_FLUSH
)
212 count
= count
* 11 / 10;
214 if (count
< PIPESAFE_FLUSH
)
217 count
+= PIPESAFE_FLUSH
;
222 static void mark_tips(struct fetch_negotiator
*negotiator
,
223 const struct oid_array
*negotiation_tips
)
227 if (!negotiation_tips
) {
228 for_each_ref(rev_list_insert_ref_oid
, negotiator
);
232 for (i
= 0; i
< negotiation_tips
->nr
; i
++)
233 rev_list_insert_ref(negotiator
, NULL
,
234 &negotiation_tips
->oid
[i
]);
238 static int find_common(struct fetch_negotiator
*negotiator
,
239 struct fetch_pack_args
*args
,
240 int fd
[2], struct object_id
*result_oid
,
244 int count
= 0, flushes
= 0, flush_at
= INITIAL_FLUSH
, retval
;
245 const struct object_id
*oid
;
246 unsigned in_vain
= 0;
247 int got_continue
= 0;
249 struct strbuf req_buf
= STRBUF_INIT
;
250 size_t state_len
= 0;
252 if (args
->stateless_rpc
&& multi_ack
== 1)
253 die(_("--stateless-rpc requires multi_ack_detailed"));
255 if (!args
->no_dependents
) {
256 mark_tips(negotiator
, args
->negotiation_tips
);
257 for_each_cached_alternate(negotiator
, insert_one_alternate_object
);
261 for ( ; refs
; refs
= refs
->next
) {
262 struct object_id
*remote
= &refs
->old_oid
;
263 const char *remote_hex
;
267 * If that object is complete (i.e. it is an ancestor of a
268 * local ref), we tell them we have it but do not have to
269 * tell them about its ancestors, which they already know
272 * We use lookup_object here because we are only
273 * interested in the case we *know* the object is
274 * reachable and we have already scanned it.
276 * Do this only if args->no_dependents is false (if it is true,
277 * we cannot trust the object flags).
279 if (!args
->no_dependents
&&
280 ((o
= lookup_object(the_repository
, remote
->hash
)) != NULL
) &&
281 (o
->flags
& COMPLETE
)) {
285 remote_hex
= oid_to_hex(remote
);
287 struct strbuf c
= STRBUF_INIT
;
288 if (multi_ack
== 2) strbuf_addstr(&c
, " multi_ack_detailed");
289 if (multi_ack
== 1) strbuf_addstr(&c
, " multi_ack");
290 if (no_done
) strbuf_addstr(&c
, " no-done");
291 if (use_sideband
== 2) strbuf_addstr(&c
, " side-band-64k");
292 if (use_sideband
== 1) strbuf_addstr(&c
, " side-band");
293 if (args
->deepen_relative
) strbuf_addstr(&c
, " deepen-relative");
294 if (args
->use_thin_pack
) strbuf_addstr(&c
, " thin-pack");
295 if (args
->no_progress
) strbuf_addstr(&c
, " no-progress");
296 if (args
->include_tag
) strbuf_addstr(&c
, " include-tag");
297 if (prefer_ofs_delta
) strbuf_addstr(&c
, " ofs-delta");
298 if (deepen_since_ok
) strbuf_addstr(&c
, " deepen-since");
299 if (deepen_not_ok
) strbuf_addstr(&c
, " deepen-not");
300 if (agent_supported
) strbuf_addf(&c
, " agent=%s",
301 git_user_agent_sanitized());
302 if (args
->filter_options
.choice
)
303 strbuf_addstr(&c
, " filter");
304 packet_buf_write(&req_buf
, "want %s%s\n", remote_hex
, c
.buf
);
307 packet_buf_write(&req_buf
, "want %s\n", remote_hex
);
312 strbuf_release(&req_buf
);
317 if (is_repository_shallow(the_repository
))
318 write_shallow_commits(&req_buf
, 1, NULL
);
320 packet_buf_write(&req_buf
, "deepen %d", args
->depth
);
321 if (args
->deepen_since
) {
322 timestamp_t max_age
= approxidate(args
->deepen_since
);
323 packet_buf_write(&req_buf
, "deepen-since %"PRItime
, max_age
);
325 if (args
->deepen_not
) {
327 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
328 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
329 packet_buf_write(&req_buf
, "deepen-not %s", s
->string
);
332 if (server_supports_filtering
&& args
->filter_options
.choice
)
333 packet_buf_write(&req_buf
, "filter %s",
334 args
->filter_options
.filter_spec
);
335 packet_buf_flush(&req_buf
);
336 state_len
= req_buf
.len
;
341 struct object_id oid
;
343 send_request(args
, fd
[1], &req_buf
);
344 while ((line
= packet_read_line(fd
[0], NULL
))) {
345 if (skip_prefix(line
, "shallow ", &arg
)) {
346 if (get_oid_hex(arg
, &oid
))
347 die(_("invalid shallow line: %s"), line
);
348 register_shallow(the_repository
, &oid
);
351 if (skip_prefix(line
, "unshallow ", &arg
)) {
352 if (get_oid_hex(arg
, &oid
))
353 die(_("invalid unshallow line: %s"), line
);
354 if (!lookup_object(the_repository
, oid
.hash
))
355 die(_("object not found: %s"), line
);
356 /* make sure that it is parsed as shallow */
357 if (!parse_object(the_repository
, &oid
))
358 die(_("error in object: %s"), line
);
359 if (unregister_shallow(&oid
))
360 die(_("no shallow found: %s"), line
);
363 die(_("expected shallow/unshallow, got %s"), line
);
365 } else if (!args
->stateless_rpc
)
366 send_request(args
, fd
[1], &req_buf
);
368 if (!args
->stateless_rpc
) {
369 /* If we aren't using the stateless-rpc interface
370 * we don't need to retain the headers.
372 strbuf_setlen(&req_buf
, 0);
378 if (args
->no_dependents
)
380 while ((oid
= negotiator
->next(negotiator
))) {
381 packet_buf_write(&req_buf
, "have %s\n", oid_to_hex(oid
));
382 print_verbose(args
, "have %s", oid_to_hex(oid
));
384 if (flush_at
<= ++count
) {
387 packet_buf_flush(&req_buf
);
388 send_request(args
, fd
[1], &req_buf
);
389 strbuf_setlen(&req_buf
, state_len
);
391 flush_at
= next_flush(args
->stateless_rpc
, count
);
394 * We keep one window "ahead" of the other side, and
395 * will wait for an ACK only on the next one
397 if (!args
->stateless_rpc
&& count
== INITIAL_FLUSH
)
400 consume_shallow_list(args
, fd
[0]);
402 ack
= get_ack(fd
[0], result_oid
);
404 print_verbose(args
, _("got %s %d %s"), "ack",
405 ack
, oid_to_hex(result_oid
));
415 struct commit
*commit
=
416 lookup_commit(the_repository
,
421 die(_("invalid commit %s"), oid_to_hex(result_oid
));
422 was_common
= negotiator
->ack(negotiator
, commit
);
423 if (args
->stateless_rpc
426 /* We need to replay the have for this object
427 * on the next RPC request so the peer knows
428 * it is in common with us.
430 const char *hex
= oid_to_hex(result_oid
);
431 packet_buf_write(&req_buf
, "have %s\n", hex
);
432 state_len
= req_buf
.len
;
434 * Reset in_vain because an ack
435 * for this commit has not been
439 } else if (!args
->stateless_rpc
440 || ack
!= ACK_common
)
444 if (ack
== ACK_ready
)
451 if (got_continue
&& MAX_IN_VAIN
< in_vain
) {
452 print_verbose(args
, _("giving up"));
460 if (!got_ready
|| !no_done
) {
461 packet_buf_write(&req_buf
, "done\n");
462 send_request(args
, fd
[1], &req_buf
);
464 print_verbose(args
, _("done"));
469 strbuf_release(&req_buf
);
471 if (!got_ready
|| !no_done
)
472 consume_shallow_list(args
, fd
[0]);
473 while (flushes
|| multi_ack
) {
474 int ack
= get_ack(fd
[0], result_oid
);
476 print_verbose(args
, _("got %s (%d) %s"), "ack",
477 ack
, oid_to_hex(result_oid
));
485 /* it is no error to fetch into a completely empty repo */
486 return count
? retval
: 0;
489 static struct commit_list
*complete
;
491 static int mark_complete(const struct object_id
*oid
)
493 struct object
*o
= parse_object(the_repository
, oid
);
495 while (o
&& o
->type
== OBJ_TAG
) {
496 struct tag
*t
= (struct tag
*) o
;
498 break; /* broken repository */
499 o
->flags
|= COMPLETE
;
500 o
= parse_object(the_repository
, &t
->tagged
->oid
);
502 if (o
&& o
->type
== OBJ_COMMIT
) {
503 struct commit
*commit
= (struct commit
*)o
;
504 if (!(commit
->object
.flags
& COMPLETE
)) {
505 commit
->object
.flags
|= COMPLETE
;
506 commit_list_insert(commit
, &complete
);
512 static int mark_complete_oid(const char *refname
, const struct object_id
*oid
,
513 int flag
, void *cb_data
)
515 return mark_complete(oid
);
518 static void mark_recent_complete_commits(struct fetch_pack_args
*args
,
521 while (complete
&& cutoff
<= complete
->item
->date
) {
522 print_verbose(args
, _("Marking %s as complete"),
523 oid_to_hex(&complete
->item
->object
.oid
));
524 pop_most_recent_commit(&complete
, COMPLETE
);
528 static void add_refs_to_oidset(struct oidset
*oids
, struct ref
*refs
)
530 for (; refs
; refs
= refs
->next
)
531 oidset_insert(oids
, &refs
->old_oid
);
534 static int is_unmatched_ref(const struct ref
*ref
)
536 struct object_id oid
;
538 return ref
->match_status
== REF_NOT_MATCHED
&&
539 !parse_oid_hex(ref
->name
, &oid
, &p
) &&
541 oideq(&oid
, &ref
->old_oid
);
544 static void filter_refs(struct fetch_pack_args
*args
,
546 struct ref
**sought
, int nr_sought
)
548 struct ref
*newlist
= NULL
;
549 struct ref
**newtail
= &newlist
;
550 struct ref
*unmatched
= NULL
;
551 struct ref
*ref
, *next
;
552 struct oidset tip_oids
= OIDSET_INIT
;
554 int strict
= !(allow_unadvertised_object_request
&
555 (ALLOW_TIP_SHA1
| ALLOW_REACHABLE_SHA1
));
558 for (ref
= *refs
; ref
; ref
= next
) {
562 if (starts_with(ref
->name
, "refs/") &&
563 check_refname_format(ref
->name
, 0))
566 while (i
< nr_sought
) {
567 int cmp
= strcmp(ref
->name
, sought
[i
]->name
);
569 break; /* definitely do not have it */
571 keep
= 1; /* definitely have it */
572 sought
[i
]->match_status
= REF_MATCHED
;
577 if (!keep
&& args
->fetch_all
&&
578 (!args
->deepen
|| !starts_with(ref
->name
, "refs/tags/")))
585 newtail
= &ref
->next
;
587 ref
->next
= unmatched
;
593 for (i
= 0; i
< nr_sought
; i
++) {
595 if (!is_unmatched_ref(ref
))
598 add_refs_to_oidset(&tip_oids
, unmatched
);
599 add_refs_to_oidset(&tip_oids
, newlist
);
604 /* Append unmatched requests to the list */
605 for (i
= 0; i
< nr_sought
; i
++) {
607 if (!is_unmatched_ref(ref
))
610 if (!strict
|| oidset_contains(&tip_oids
, &ref
->old_oid
)) {
611 ref
->match_status
= REF_MATCHED
;
612 *newtail
= copy_ref(ref
);
613 newtail
= &(*newtail
)->next
;
615 ref
->match_status
= REF_UNADVERTISED_NOT_ALLOWED
;
619 oidset_clear(&tip_oids
);
620 for (ref
= unmatched
; ref
; ref
= next
) {
628 static void mark_alternate_complete(struct fetch_negotiator
*unused
,
631 mark_complete(&obj
->oid
);
634 struct loose_object_iter
{
635 struct oidset
*loose_object_set
;
640 * If the number of refs is not larger than the number of loose objects,
641 * this function stops inserting.
643 static int add_loose_objects_to_set(const struct object_id
*oid
,
647 struct loose_object_iter
*iter
= data
;
648 oidset_insert(iter
->loose_object_set
, oid
);
649 if (iter
->refs
== NULL
)
652 iter
->refs
= iter
->refs
->next
;
657 * Mark recent commits available locally and reachable from a local ref as
658 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
659 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
660 * thus do not need COMMON_REF marks).
662 * The cutoff time for recency is determined by this heuristic: it is the
663 * earliest commit time of the objects in refs that are commits and that we know
664 * the commit time of.
666 static void mark_complete_and_common_ref(struct fetch_negotiator
*negotiator
,
667 struct fetch_pack_args
*args
,
671 int old_save_commit_buffer
= save_commit_buffer
;
672 timestamp_t cutoff
= 0;
673 struct oidset loose_oid_set
= OIDSET_INIT
;
675 struct loose_object_iter iter
= {&loose_oid_set
, *refs
};
677 /* Enumerate all loose objects or know refs are not so many. */
678 use_oidset
= !for_each_loose_object(add_loose_objects_to_set
,
681 save_commit_buffer
= 0;
683 for (ref
= *refs
; ref
; ref
= ref
->next
) {
685 unsigned int flags
= OBJECT_INFO_QUICK
;
688 !oidset_contains(&loose_oid_set
, &ref
->old_oid
)) {
690 * I know this does not exist in the loose form,
691 * so check if it exists in a non-loose form.
693 flags
|= OBJECT_INFO_IGNORE_LOOSE
;
696 if (!has_object_file_with_flags(&ref
->old_oid
, flags
))
698 o
= parse_object(the_repository
, &ref
->old_oid
);
702 /* We already have it -- which may mean that we were
703 * in sync with the other side at some time after
704 * that (it is OK if we guess wrong here).
706 if (o
->type
== OBJ_COMMIT
) {
707 struct commit
*commit
= (struct commit
*)o
;
708 if (!cutoff
|| cutoff
< commit
->date
)
709 cutoff
= commit
->date
;
713 oidset_clear(&loose_oid_set
);
716 for_each_ref(mark_complete_oid
, NULL
);
717 for_each_cached_alternate(NULL
, mark_alternate_complete
);
718 commit_list_sort_by_date(&complete
);
720 mark_recent_complete_commits(args
, cutoff
);
724 * Mark all complete remote refs as common refs.
725 * Don't mark them common yet; the server has to be told so first.
727 for (ref
= *refs
; ref
; ref
= ref
->next
) {
728 struct object
*o
= deref_tag(the_repository
,
729 lookup_object(the_repository
,
733 if (!o
|| o
->type
!= OBJ_COMMIT
|| !(o
->flags
& COMPLETE
))
736 negotiator
->known_common(negotiator
,
740 save_commit_buffer
= old_save_commit_buffer
;
744 * Returns 1 if every object pointed to by the given remote refs is available
745 * locally and reachable from a local ref, and 0 otherwise.
747 static int everything_local(struct fetch_pack_args
*args
,
753 for (retval
= 1, ref
= *refs
; ref
; ref
= ref
->next
) {
754 const struct object_id
*remote
= &ref
->old_oid
;
757 o
= lookup_object(the_repository
, remote
->hash
);
758 if (!o
|| !(o
->flags
& COMPLETE
)) {
760 print_verbose(args
, "want %s (%s)", oid_to_hex(remote
),
764 print_verbose(args
, _("already have %s (%s)"), oid_to_hex(remote
),
771 static int sideband_demux(int in
, int out
, void *data
)
776 ret
= recv_sideband("fetch-pack", xd
[0], out
);
781 static int get_pack(struct fetch_pack_args
*args
,
782 int xd
[2], char **pack_lockfile
)
785 int do_keep
= args
->keep_pack
;
786 const char *cmd_name
;
787 struct pack_header header
;
789 struct child_process cmd
= CHILD_PROCESS_INIT
;
792 memset(&demux
, 0, sizeof(demux
));
794 /* xd[] is talking with upload-pack; subprocess reads from
795 * xd[0], spits out band#2 to stderr, and feeds us band#1
796 * through demux->out.
798 demux
.proc
= sideband_demux
;
801 demux
.isolate_sigpipe
= 1;
802 if (start_async(&demux
))
803 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
808 if (!args
->keep_pack
&& unpack_limit
) {
810 if (read_pack_header(demux
.out
, &header
))
811 die(_("protocol error: bad pack header"));
813 if (ntohl(header
.hdr_entries
) < unpack_limit
)
819 if (alternate_shallow_file
) {
820 argv_array_push(&cmd
.args
, "--shallow-file");
821 argv_array_push(&cmd
.args
, alternate_shallow_file
);
824 if (do_keep
|| args
->from_promisor
) {
827 cmd_name
= "index-pack";
828 argv_array_push(&cmd
.args
, cmd_name
);
829 argv_array_push(&cmd
.args
, "--stdin");
830 if (!args
->quiet
&& !args
->no_progress
)
831 argv_array_push(&cmd
.args
, "-v");
832 if (args
->use_thin_pack
)
833 argv_array_push(&cmd
.args
, "--fix-thin");
834 if (do_keep
&& (args
->lock_pack
|| unpack_limit
)) {
835 char hostname
[HOST_NAME_MAX
+ 1];
836 if (xgethostname(hostname
, sizeof(hostname
)))
837 xsnprintf(hostname
, sizeof(hostname
), "localhost");
838 argv_array_pushf(&cmd
.args
,
839 "--keep=fetch-pack %"PRIuMAX
" on %s",
840 (uintmax_t)getpid(), hostname
);
842 if (args
->check_self_contained_and_connected
)
843 argv_array_push(&cmd
.args
, "--check-self-contained-and-connected");
844 if (args
->from_promisor
)
845 argv_array_push(&cmd
.args
, "--promisor");
848 cmd_name
= "unpack-objects";
849 argv_array_push(&cmd
.args
, cmd_name
);
850 if (args
->quiet
|| args
->no_progress
)
851 argv_array_push(&cmd
.args
, "-q");
852 args
->check_self_contained_and_connected
= 0;
856 argv_array_pushf(&cmd
.args
, "--pack_header=%"PRIu32
",%"PRIu32
,
857 ntohl(header
.hdr_version
),
858 ntohl(header
.hdr_entries
));
859 if (fetch_fsck_objects
>= 0
861 : transfer_fsck_objects
>= 0
862 ? transfer_fsck_objects
864 if (args
->from_promisor
)
866 * We cannot use --strict in index-pack because it
867 * checks both broken objects and links, but we only
868 * want to check for broken objects.
870 argv_array_push(&cmd
.args
, "--fsck-objects");
872 argv_array_pushf(&cmd
.args
, "--strict%s",
878 if (start_command(&cmd
))
879 die(_("fetch-pack: unable to fork off %s"), cmd_name
);
880 if (do_keep
&& pack_lockfile
) {
881 *pack_lockfile
= index_pack_lockfile(cmd
.out
);
886 /* Closed by start_command() */
889 ret
= finish_command(&cmd
);
890 if (!ret
|| (args
->check_self_contained_and_connected
&& ret
== 1))
891 args
->self_contained_and_connected
=
892 args
->check_self_contained_and_connected
&&
895 die(_("%s failed"), cmd_name
);
896 if (use_sideband
&& finish_async(&demux
))
897 die(_("error in sideband demultiplexer"));
901 static int cmp_ref_by_name(const void *a_
, const void *b_
)
903 const struct ref
*a
= *((const struct ref
**)a_
);
904 const struct ref
*b
= *((const struct ref
**)b_
);
905 return strcmp(a
->name
, b
->name
);
908 static struct ref
*do_fetch_pack(struct fetch_pack_args
*args
,
910 const struct ref
*orig_ref
,
911 struct ref
**sought
, int nr_sought
,
912 struct shallow_info
*si
,
913 char **pack_lockfile
)
915 struct ref
*ref
= copy_ref_list(orig_ref
);
916 struct object_id oid
;
917 const char *agent_feature
;
919 struct fetch_negotiator negotiator
;
920 fetch_negotiator_init(&negotiator
, negotiation_algorithm
);
922 sort_ref_list(&ref
, ref_compare_name
);
923 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
925 if ((args
->depth
> 0 || is_repository_shallow(the_repository
)) && !server_supports("shallow"))
926 die(_("Server does not support shallow clients"));
927 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
929 if (server_supports("multi_ack_detailed")) {
930 print_verbose(args
, _("Server supports multi_ack_detailed"));
932 if (server_supports("no-done")) {
933 print_verbose(args
, _("Server supports no-done"));
934 if (args
->stateless_rpc
)
938 else if (server_supports("multi_ack")) {
939 print_verbose(args
, _("Server supports multi_ack"));
942 if (server_supports("side-band-64k")) {
943 print_verbose(args
, _("Server supports side-band-64k"));
946 else if (server_supports("side-band")) {
947 print_verbose(args
, _("Server supports side-band"));
950 if (server_supports("allow-tip-sha1-in-want")) {
951 print_verbose(args
, _("Server supports allow-tip-sha1-in-want"));
952 allow_unadvertised_object_request
|= ALLOW_TIP_SHA1
;
954 if (server_supports("allow-reachable-sha1-in-want")) {
955 print_verbose(args
, _("Server supports allow-reachable-sha1-in-want"));
956 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
958 if (!server_supports("thin-pack"))
959 args
->use_thin_pack
= 0;
960 if (!server_supports("no-progress"))
961 args
->no_progress
= 0;
962 if (!server_supports("include-tag"))
963 args
->include_tag
= 0;
964 if (server_supports("ofs-delta"))
965 print_verbose(args
, _("Server supports ofs-delta"));
967 prefer_ofs_delta
= 0;
969 if (server_supports("filter")) {
970 server_supports_filtering
= 1;
971 print_verbose(args
, _("Server supports filter"));
972 } else if (args
->filter_options
.choice
) {
973 warning("filtering not recognized by server, ignoring");
976 if ((agent_feature
= server_feature_value("agent", &agent_len
))) {
979 print_verbose(args
, _("Server version is %.*s"),
980 agent_len
, agent_feature
);
982 if (server_supports("deepen-since"))
984 else if (args
->deepen_since
)
985 die(_("Server does not support --shallow-since"));
986 if (server_supports("deepen-not"))
988 else if (args
->deepen_not
)
989 die(_("Server does not support --shallow-exclude"));
990 if (!server_supports("deepen-relative") && args
->deepen_relative
)
991 die(_("Server does not support --deepen"));
993 if (!args
->no_dependents
) {
994 mark_complete_and_common_ref(&negotiator
, args
, &ref
);
995 filter_refs(args
, &ref
, sought
, nr_sought
);
996 if (everything_local(args
, &ref
)) {
1001 filter_refs(args
, &ref
, sought
, nr_sought
);
1003 if (find_common(&negotiator
, args
, fd
, &oid
, ref
) < 0)
1004 if (!args
->keep_pack
)
1005 /* When cloning, it is not unusual to have
1008 warning(_("no common commits"));
1010 if (args
->stateless_rpc
)
1011 packet_flush(fd
[1]);
1013 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
,
1015 else if (si
->nr_ours
|| si
->nr_theirs
)
1016 alternate_shallow_file
= setup_temporary_shallow(si
->shallow
);
1018 alternate_shallow_file
= NULL
;
1019 if (get_pack(args
, fd
, pack_lockfile
))
1020 die(_("git fetch-pack: fetch failed."));
1023 negotiator
.release(&negotiator
);
1027 static void add_shallow_requests(struct strbuf
*req_buf
,
1028 const struct fetch_pack_args
*args
)
1030 if (is_repository_shallow(the_repository
))
1031 write_shallow_commits(req_buf
, 1, NULL
);
1032 if (args
->depth
> 0)
1033 packet_buf_write(req_buf
, "deepen %d", args
->depth
);
1034 if (args
->deepen_since
) {
1035 timestamp_t max_age
= approxidate(args
->deepen_since
);
1036 packet_buf_write(req_buf
, "deepen-since %"PRItime
, max_age
);
1038 if (args
->deepen_not
) {
1040 for (i
= 0; i
< args
->deepen_not
->nr
; i
++) {
1041 struct string_list_item
*s
= args
->deepen_not
->items
+ i
;
1042 packet_buf_write(req_buf
, "deepen-not %s", s
->string
);
1047 static void add_wants(int no_dependents
, const struct ref
*wants
, struct strbuf
*req_buf
)
1049 int use_ref_in_want
= server_supports_feature("fetch", "ref-in-want", 0);
1051 for ( ; wants
; wants
= wants
->next
) {
1052 const struct object_id
*remote
= &wants
->old_oid
;
1056 * If that object is complete (i.e. it is an ancestor of a
1057 * local ref), we tell them we have it but do not have to
1058 * tell them about its ancestors, which they already know
1061 * We use lookup_object here because we are only
1062 * interested in the case we *know* the object is
1063 * reachable and we have already scanned it.
1065 * Do this only if args->no_dependents is false (if it is true,
1066 * we cannot trust the object flags).
1068 if (!no_dependents
&&
1069 ((o
= lookup_object(the_repository
, remote
->hash
)) != NULL
) &&
1070 (o
->flags
& COMPLETE
)) {
1074 if (!use_ref_in_want
|| wants
->exact_oid
)
1075 packet_buf_write(req_buf
, "want %s\n", oid_to_hex(remote
));
1077 packet_buf_write(req_buf
, "want-ref %s\n", wants
->name
);
1081 static void add_common(struct strbuf
*req_buf
, struct oidset
*common
)
1083 struct oidset_iter iter
;
1084 const struct object_id
*oid
;
1085 oidset_iter_init(common
, &iter
);
1087 while ((oid
= oidset_iter_next(&iter
))) {
1088 packet_buf_write(req_buf
, "have %s\n", oid_to_hex(oid
));
1092 static int add_haves(struct fetch_negotiator
*negotiator
,
1093 struct strbuf
*req_buf
,
1094 int *haves_to_send
, int *in_vain
)
1097 int haves_added
= 0;
1098 const struct object_id
*oid
;
1100 while ((oid
= negotiator
->next(negotiator
))) {
1101 packet_buf_write(req_buf
, "have %s\n", oid_to_hex(oid
));
1102 if (++haves_added
>= *haves_to_send
)
1106 *in_vain
+= haves_added
;
1107 if (!haves_added
|| *in_vain
>= MAX_IN_VAIN
) {
1109 packet_buf_write(req_buf
, "done\n");
1113 /* Increase haves to send on next round */
1114 *haves_to_send
= next_flush(1, *haves_to_send
);
1119 static int send_fetch_request(struct fetch_negotiator
*negotiator
, int fd_out
,
1120 const struct fetch_pack_args
*args
,
1121 const struct ref
*wants
, struct oidset
*common
,
1122 int *haves_to_send
, int *in_vain
)
1125 struct strbuf req_buf
= STRBUF_INIT
;
1127 if (server_supports_v2("fetch", 1))
1128 packet_buf_write(&req_buf
, "command=fetch");
1129 if (server_supports_v2("agent", 0))
1130 packet_buf_write(&req_buf
, "agent=%s", git_user_agent_sanitized());
1131 if (args
->server_options
&& args
->server_options
->nr
&&
1132 server_supports_v2("server-option", 1)) {
1134 for (i
= 0; i
< args
->server_options
->nr
; i
++)
1135 packet_write_fmt(fd_out
, "server-option=%s",
1136 args
->server_options
->items
[i
].string
);
1139 packet_buf_delim(&req_buf
);
1140 if (args
->use_thin_pack
)
1141 packet_buf_write(&req_buf
, "thin-pack");
1142 if (args
->no_progress
)
1143 packet_buf_write(&req_buf
, "no-progress");
1144 if (args
->include_tag
)
1145 packet_buf_write(&req_buf
, "include-tag");
1146 if (prefer_ofs_delta
)
1147 packet_buf_write(&req_buf
, "ofs-delta");
1149 /* Add shallow-info and deepen request */
1150 if (server_supports_feature("fetch", "shallow", 0))
1151 add_shallow_requests(&req_buf
, args
);
1152 else if (is_repository_shallow(the_repository
) || args
->deepen
)
1153 die(_("Server does not support shallow requests"));
1156 if (server_supports_feature("fetch", "filter", 0) &&
1157 args
->filter_options
.choice
) {
1158 print_verbose(args
, _("Server supports filter"));
1159 packet_buf_write(&req_buf
, "filter %s",
1160 args
->filter_options
.filter_spec
);
1161 } else if (args
->filter_options
.choice
) {
1162 warning("filtering not recognized by server, ignoring");
1166 add_wants(args
->no_dependents
, wants
, &req_buf
);
1168 if (args
->no_dependents
) {
1169 packet_buf_write(&req_buf
, "done");
1172 /* Add all of the common commits we've found in previous rounds */
1173 add_common(&req_buf
, common
);
1175 /* Add initial haves */
1176 ret
= add_haves(negotiator
, &req_buf
, haves_to_send
, in_vain
);
1180 packet_buf_flush(&req_buf
);
1181 write_or_die(fd_out
, req_buf
.buf
, req_buf
.len
);
1183 strbuf_release(&req_buf
);
1188 * Processes a section header in a server's response and checks if it matches
1189 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1190 * not consumed); if 0, the line will be consumed and the function will die if
1191 * the section header doesn't match what was expected.
1193 static int process_section_header(struct packet_reader
*reader
,
1194 const char *section
, int peek
)
1198 if (packet_reader_peek(reader
) != PACKET_READ_NORMAL
)
1199 die(_("error reading section header '%s'"), section
);
1201 ret
= !strcmp(reader
->line
, section
);
1205 die(_("expected '%s', received '%s'"),
1206 section
, reader
->line
);
1207 packet_reader_read(reader
);
1213 static int process_acks(struct fetch_negotiator
*negotiator
,
1214 struct packet_reader
*reader
,
1215 struct oidset
*common
)
1218 int received_ready
= 0;
1219 int received_ack
= 0;
1221 process_section_header(reader
, "acknowledgments", 0);
1222 while (packet_reader_read(reader
) == PACKET_READ_NORMAL
) {
1225 if (!strcmp(reader
->line
, "NAK"))
1228 if (skip_prefix(reader
->line
, "ACK ", &arg
)) {
1229 struct object_id oid
;
1230 if (!get_oid_hex(arg
, &oid
)) {
1231 struct commit
*commit
;
1232 oidset_insert(common
, &oid
);
1233 commit
= lookup_commit(the_repository
, &oid
);
1234 negotiator
->ack(negotiator
, commit
);
1239 if (!strcmp(reader
->line
, "ready")) {
1244 die(_("unexpected acknowledgment line: '%s'"), reader
->line
);
1247 if (reader
->status
!= PACKET_READ_FLUSH
&&
1248 reader
->status
!= PACKET_READ_DELIM
)
1249 die(_("error processing acks: %d"), reader
->status
);
1252 * If an "acknowledgments" section is sent, a packfile is sent if and
1253 * only if "ready" was sent in this section. The other sections
1254 * ("shallow-info" and "wanted-refs") are sent only if a packfile is
1255 * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
1258 if (received_ready
&& reader
->status
!= PACKET_READ_DELIM
)
1259 die(_("expected packfile to be sent after 'ready'"));
1260 if (!received_ready
&& reader
->status
!= PACKET_READ_FLUSH
)
1261 die(_("expected no other sections to be sent after no 'ready'"));
1263 /* return 0 if no common, 1 if there are common, or 2 if ready */
1264 return received_ready
? 2 : (received_ack
? 1 : 0);
1267 static void receive_shallow_info(struct fetch_pack_args
*args
,
1268 struct packet_reader
*reader
)
1270 process_section_header(reader
, "shallow-info", 0);
1271 while (packet_reader_read(reader
) == PACKET_READ_NORMAL
) {
1273 struct object_id oid
;
1275 if (skip_prefix(reader
->line
, "shallow ", &arg
)) {
1276 if (get_oid_hex(arg
, &oid
))
1277 die(_("invalid shallow line: %s"), reader
->line
);
1278 register_shallow(the_repository
, &oid
);
1281 if (skip_prefix(reader
->line
, "unshallow ", &arg
)) {
1282 if (get_oid_hex(arg
, &oid
))
1283 die(_("invalid unshallow line: %s"), reader
->line
);
1284 if (!lookup_object(the_repository
, oid
.hash
))
1285 die(_("object not found: %s"), reader
->line
);
1286 /* make sure that it is parsed as shallow */
1287 if (!parse_object(the_repository
, &oid
))
1288 die(_("error in object: %s"), reader
->line
);
1289 if (unregister_shallow(&oid
))
1290 die(_("no shallow found: %s"), reader
->line
);
1293 die(_("expected shallow/unshallow, got %s"), reader
->line
);
1296 if (reader
->status
!= PACKET_READ_FLUSH
&&
1297 reader
->status
!= PACKET_READ_DELIM
)
1298 die(_("error processing shallow info: %d"), reader
->status
);
1300 setup_alternate_shallow(&shallow_lock
, &alternate_shallow_file
, NULL
);
1304 static void receive_wanted_refs(struct packet_reader
*reader
,
1305 struct ref
**sought
, int nr_sought
)
1307 process_section_header(reader
, "wanted-refs", 0);
1308 while (packet_reader_read(reader
) == PACKET_READ_NORMAL
) {
1309 struct object_id oid
;
1313 if (parse_oid_hex(reader
->line
, &oid
, &end
) || *end
++ != ' ')
1314 die(_("expected wanted-ref, got '%s'"), reader
->line
);
1316 for (i
= 0; i
< nr_sought
; i
++) {
1317 if (!strcmp(end
, sought
[i
]->name
)) {
1318 oidcpy(&sought
[i
]->old_oid
, &oid
);
1324 die(_("unexpected wanted-ref: '%s'"), reader
->line
);
1327 if (reader
->status
!= PACKET_READ_DELIM
)
1328 die(_("error processing wanted refs: %d"), reader
->status
);
1332 FETCH_CHECK_LOCAL
= 0,
1339 static struct ref
*do_fetch_pack_v2(struct fetch_pack_args
*args
,
1341 const struct ref
*orig_ref
,
1342 struct ref
**sought
, int nr_sought
,
1343 char **pack_lockfile
)
1345 struct ref
*ref
= copy_ref_list(orig_ref
);
1346 enum fetch_state state
= FETCH_CHECK_LOCAL
;
1347 struct oidset common
= OIDSET_INIT
;
1348 struct packet_reader reader
;
1350 int haves_to_send
= INITIAL_FLUSH
;
1351 struct fetch_negotiator negotiator
;
1352 fetch_negotiator_init(&negotiator
, negotiation_algorithm
);
1353 packet_reader_init(&reader
, fd
[0], NULL
, 0,
1354 PACKET_READ_CHOMP_NEWLINE
);
1356 while (state
!= FETCH_DONE
) {
1358 case FETCH_CHECK_LOCAL
:
1359 sort_ref_list(&ref
, ref_compare_name
);
1360 QSORT(sought
, nr_sought
, cmp_ref_by_name
);
1362 /* v2 supports these by default */
1363 allow_unadvertised_object_request
|= ALLOW_REACHABLE_SHA1
;
1365 if (args
->depth
> 0 || args
->deepen_since
|| args
->deepen_not
)
1368 /* Filter 'ref' by 'sought' and those that aren't local */
1369 if (!args
->no_dependents
) {
1370 mark_complete_and_common_ref(&negotiator
, args
, &ref
);
1371 filter_refs(args
, &ref
, sought
, nr_sought
);
1372 if (everything_local(args
, &ref
))
1375 state
= FETCH_SEND_REQUEST
;
1377 mark_tips(&negotiator
, args
->negotiation_tips
);
1378 for_each_cached_alternate(&negotiator
,
1379 insert_one_alternate_object
);
1381 filter_refs(args
, &ref
, sought
, nr_sought
);
1382 state
= FETCH_SEND_REQUEST
;
1385 case FETCH_SEND_REQUEST
:
1386 if (send_fetch_request(&negotiator
, fd
[1], args
, ref
,
1388 &haves_to_send
, &in_vain
))
1389 state
= FETCH_GET_PACK
;
1391 state
= FETCH_PROCESS_ACKS
;
1393 case FETCH_PROCESS_ACKS
:
1394 /* Process ACKs/NAKs */
1395 switch (process_acks(&negotiator
, &reader
, &common
)) {
1397 state
= FETCH_GET_PACK
;
1403 state
= FETCH_SEND_REQUEST
;
1407 case FETCH_GET_PACK
:
1408 /* Check for shallow-info section */
1409 if (process_section_header(&reader
, "shallow-info", 1))
1410 receive_shallow_info(args
, &reader
);
1412 if (process_section_header(&reader
, "wanted-refs", 1))
1413 receive_wanted_refs(&reader
, sought
, nr_sought
);
1416 process_section_header(&reader
, "packfile", 0);
1417 if (get_pack(args
, fd
, pack_lockfile
))
1418 die(_("git fetch-pack: fetch failed."));
1427 negotiator
.release(&negotiator
);
1428 oidset_clear(&common
);
1432 static int fetch_pack_config_cb(const char *var
, const char *value
, void *cb
)
1434 if (strcmp(var
, "fetch.fsck.skiplist") == 0) {
1437 if (git_config_pathname(&path
, var
, value
))
1439 strbuf_addf(&fsck_msg_types
, "%cskiplist=%s",
1440 fsck_msg_types
.len
? ',' : '=', path
);
1445 if (skip_prefix(var
, "fetch.fsck.", &var
)) {
1446 if (is_valid_msg_type(var
, value
))
1447 strbuf_addf(&fsck_msg_types
, "%c%s=%s",
1448 fsck_msg_types
.len
? ',' : '=', var
, value
);
1450 warning("Skipping unknown msg id '%s'", var
);
1454 return git_default_config(var
, value
, cb
);
1457 static void fetch_pack_config(void)
1459 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit
);
1460 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit
);
1461 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta
);
1462 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects
);
1463 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects
);
1464 git_config_get_string("fetch.negotiationalgorithm",
1465 &negotiation_algorithm
);
1467 git_config(fetch_pack_config_cb
, NULL
);
1470 static void fetch_pack_setup(void)
1472 static int did_setup
;
1475 fetch_pack_config();
1476 if (0 <= transfer_unpack_limit
)
1477 unpack_limit
= transfer_unpack_limit
;
1478 else if (0 <= fetch_unpack_limit
)
1479 unpack_limit
= fetch_unpack_limit
;
1483 static int remove_duplicates_in_refs(struct ref
**ref
, int nr
)
1485 struct string_list names
= STRING_LIST_INIT_NODUP
;
1488 for (src
= dst
= 0; src
< nr
; src
++) {
1489 struct string_list_item
*item
;
1490 item
= string_list_insert(&names
, ref
[src
]->name
);
1492 continue; /* already have it */
1493 item
->util
= ref
[src
];
1495 ref
[dst
] = ref
[src
];
1498 for (src
= dst
; src
< nr
; src
++)
1500 string_list_clear(&names
, 0);
1504 static void update_shallow(struct fetch_pack_args
*args
,
1505 struct ref
**sought
, int nr_sought
,
1506 struct shallow_info
*si
)
1508 struct oid_array ref
= OID_ARRAY_INIT
;
1512 if (args
->deepen
&& alternate_shallow_file
) {
1513 if (*alternate_shallow_file
== '\0') { /* --unshallow */
1514 unlink_or_warn(git_path_shallow(the_repository
));
1515 rollback_lock_file(&shallow_lock
);
1517 commit_lock_file(&shallow_lock
);
1521 if (!si
->shallow
|| !si
->shallow
->nr
)
1524 if (args
->cloning
) {
1526 * remote is shallow, but this is a clone, there are
1527 * no objects in repo to worry about. Accept any
1528 * shallow points that exist in the pack (iow in repo
1529 * after get_pack() and reprepare_packed_git())
1531 struct oid_array extra
= OID_ARRAY_INIT
;
1532 struct object_id
*oid
= si
->shallow
->oid
;
1533 for (i
= 0; i
< si
->shallow
->nr
; i
++)
1534 if (has_object_file(&oid
[i
]))
1535 oid_array_append(&extra
, &oid
[i
]);
1537 setup_alternate_shallow(&shallow_lock
,
1538 &alternate_shallow_file
,
1540 commit_lock_file(&shallow_lock
);
1542 oid_array_clear(&extra
);
1546 if (!si
->nr_ours
&& !si
->nr_theirs
)
1549 remove_nonexistent_theirs_shallow(si
);
1550 if (!si
->nr_ours
&& !si
->nr_theirs
)
1552 for (i
= 0; i
< nr_sought
; i
++)
1553 oid_array_append(&ref
, &sought
[i
]->old_oid
);
1556 if (args
->update_shallow
) {
1558 * remote is also shallow, .git/shallow may be updated
1559 * so all refs can be accepted. Make sure we only add
1560 * shallow roots that are actually reachable from new
1563 struct oid_array extra
= OID_ARRAY_INIT
;
1564 struct object_id
*oid
= si
->shallow
->oid
;
1565 assign_shallow_commits_to_refs(si
, NULL
, NULL
);
1566 if (!si
->nr_ours
&& !si
->nr_theirs
) {
1567 oid_array_clear(&ref
);
1570 for (i
= 0; i
< si
->nr_ours
; i
++)
1571 oid_array_append(&extra
, &oid
[si
->ours
[i
]]);
1572 for (i
= 0; i
< si
->nr_theirs
; i
++)
1573 oid_array_append(&extra
, &oid
[si
->theirs
[i
]]);
1574 setup_alternate_shallow(&shallow_lock
,
1575 &alternate_shallow_file
,
1577 commit_lock_file(&shallow_lock
);
1578 oid_array_clear(&extra
);
1579 oid_array_clear(&ref
);
1584 * remote is also shallow, check what ref is safe to update
1585 * without updating .git/shallow
1587 status
= xcalloc(nr_sought
, sizeof(*status
));
1588 assign_shallow_commits_to_refs(si
, NULL
, status
);
1589 if (si
->nr_ours
|| si
->nr_theirs
) {
1590 for (i
= 0; i
< nr_sought
; i
++)
1592 sought
[i
]->status
= REF_STATUS_REJECT_SHALLOW
;
1595 oid_array_clear(&ref
);
1598 static int iterate_ref_map(void *cb_data
, struct object_id
*oid
)
1600 struct ref
**rm
= cb_data
;
1601 struct ref
*ref
= *rm
;
1604 return -1; /* end of the list */
1606 oidcpy(oid
, &ref
->old_oid
);
1610 struct ref
*fetch_pack(struct fetch_pack_args
*args
,
1611 int fd
[], struct child_process
*conn
,
1612 const struct ref
*ref
,
1614 struct ref
**sought
, int nr_sought
,
1615 struct oid_array
*shallow
,
1616 char **pack_lockfile
,
1617 enum protocol_version version
)
1619 struct ref
*ref_cpy
;
1620 struct shallow_info si
;
1624 nr_sought
= remove_duplicates_in_refs(sought
, nr_sought
);
1626 if (args
->no_dependents
&& !args
->filter_options
.choice
) {
1628 * The protocol does not support requesting that only the
1629 * wanted objects be sent, so approximate this by setting a
1630 * "blob:none" filter if no filter is already set. This works
1631 * for all object types: note that wanted blobs will still be
1632 * sent because they are directly specified as a "want".
1634 * NEEDSWORK: Add an option in the protocol to request that
1635 * only the wanted objects be sent, and implement it.
1637 parse_list_objects_filter(&args
->filter_options
, "blob:none");
1640 if (version
!= protocol_v2
&& !ref
) {
1641 packet_flush(fd
[1]);
1642 die(_("no matching remote head"));
1644 prepare_shallow_info(&si
, shallow
);
1645 if (version
== protocol_v2
)
1646 ref_cpy
= do_fetch_pack_v2(args
, fd
, ref
, sought
, nr_sought
,
1649 ref_cpy
= do_fetch_pack(args
, fd
, ref
, sought
, nr_sought
,
1650 &si
, pack_lockfile
);
1651 reprepare_packed_git(the_repository
);
1653 if (!args
->cloning
&& args
->deepen
) {
1654 struct check_connected_options opt
= CHECK_CONNECTED_INIT
;
1655 struct ref
*iterator
= ref_cpy
;
1656 opt
.shallow_file
= alternate_shallow_file
;
1658 opt
.is_deepening_fetch
= 1;
1659 if (check_connected(iterate_ref_map
, &iterator
, &opt
)) {
1660 error(_("remote did not send all necessary objects"));
1663 rollback_lock_file(&shallow_lock
);
1666 args
->connectivity_checked
= 1;
1669 update_shallow(args
, sought
, nr_sought
, &si
);
1671 clear_shallow_info(&si
);
1675 int report_unmatched_refs(struct ref
**sought
, int nr_sought
)
1679 for (i
= 0; i
< nr_sought
; i
++) {
1682 switch (sought
[i
]->match_status
) {
1685 case REF_NOT_MATCHED
:
1686 error(_("no such remote ref %s"), sought
[i
]->name
);
1688 case REF_UNADVERTISED_NOT_ALLOWED
:
1689 error(_("Server does not allow request for unadvertised object %s"),