fetch doc: cross-link two new negotiation options
[git.git] / fetch-pack.c
blob50773fdde35a75181432f7f85ddf433dd0fb8c67
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec-cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "sha1-array.h"
19 #include "oidset.h"
20 #include "packfile.h"
21 #include "fetch-negotiator.h"
23 static int transfer_unpack_limit = -1;
24 static int fetch_unpack_limit = -1;
25 static int unpack_limit = 100;
26 static int prefer_ofs_delta = 1;
27 static int no_done;
28 static int deepen_since_ok;
29 static int deepen_not_ok;
30 static int fetch_fsck_objects = -1;
31 static int transfer_fsck_objects = -1;
32 static int agent_supported;
33 static int server_supports_filtering;
34 static struct lock_file shallow_lock;
35 static const char *alternate_shallow_file;
36 static char *negotiation_algorithm;
38 /* Remember to update object flag allocation in object.h */
39 #define COMPLETE (1U << 0)
40 #define ALTERNATE (1U << 1)
43 * After sending this many "have"s if we do not get any new ACK , we
44 * give up traversing our history.
46 #define MAX_IN_VAIN 256
48 static int multi_ack, use_sideband;
49 /* Allow specifying sha1 if it is a ref tip. */
50 #define ALLOW_TIP_SHA1 01
51 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
52 #define ALLOW_REACHABLE_SHA1 02
53 static unsigned int allow_unadvertised_object_request;
55 __attribute__((format (printf, 2, 3)))
56 static inline void print_verbose(const struct fetch_pack_args *args,
57 const char *fmt, ...)
59 va_list params;
61 if (!args->verbose)
62 return;
64 va_start(params, fmt);
65 vfprintf(stderr, fmt, params);
66 va_end(params);
67 fputc('\n', stderr);
70 struct alternate_object_cache {
71 struct object **items;
72 size_t nr, alloc;
75 static void cache_one_alternate(const char *refname,
76 const struct object_id *oid,
77 void *vcache)
79 struct alternate_object_cache *cache = vcache;
80 struct object *obj = parse_object(oid);
82 if (!obj || (obj->flags & ALTERNATE))
83 return;
85 obj->flags |= ALTERNATE;
86 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
87 cache->items[cache->nr++] = obj;
90 static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
91 void (*cb)(struct fetch_negotiator *,
92 struct object *))
94 static int initialized;
95 static struct alternate_object_cache cache;
96 size_t i;
98 if (!initialized) {
99 for_each_alternate_ref(cache_one_alternate, &cache);
100 initialized = 1;
103 for (i = 0; i < cache.nr; i++)
104 cb(negotiator, cache.items[i]);
107 static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
108 const char *refname,
109 const struct object_id *oid)
111 struct object *o = deref_tag(parse_object(oid), refname, 0);
113 if (o && o->type == OBJ_COMMIT)
114 negotiator->add_tip(negotiator, (struct commit *)o);
116 return 0;
119 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
120 int flag, void *cb_data)
122 return rev_list_insert_ref(cb_data, refname, oid);
125 enum ack_type {
126 NAK = 0,
127 ACK,
128 ACK_continue,
129 ACK_common,
130 ACK_ready
133 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
135 if (args->stateless_rpc && args->deepen) {
136 /* If we sent a depth we will get back "duplicate"
137 * shallow and unshallow commands every time there
138 * is a block of have lines exchanged.
140 char *line;
141 while ((line = packet_read_line(fd, NULL))) {
142 if (starts_with(line, "shallow "))
143 continue;
144 if (starts_with(line, "unshallow "))
145 continue;
146 die(_("git fetch-pack: expected shallow list"));
151 static enum ack_type get_ack(int fd, struct object_id *result_oid)
153 int len;
154 char *line = packet_read_line(fd, &len);
155 const char *arg;
157 if (!line)
158 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
159 if (!strcmp(line, "NAK"))
160 return NAK;
161 if (skip_prefix(line, "ACK ", &arg)) {
162 if (!get_oid_hex(arg, result_oid)) {
163 arg += 40;
164 len -= arg - line;
165 if (len < 1)
166 return ACK;
167 if (strstr(arg, "continue"))
168 return ACK_continue;
169 if (strstr(arg, "common"))
170 return ACK_common;
171 if (strstr(arg, "ready"))
172 return ACK_ready;
173 return ACK;
176 if (skip_prefix(line, "ERR ", &arg))
177 die(_("remote error: %s"), arg);
178 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
181 static void send_request(struct fetch_pack_args *args,
182 int fd, struct strbuf *buf)
184 if (args->stateless_rpc) {
185 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
186 packet_flush(fd);
187 } else
188 write_or_die(fd, buf->buf, buf->len);
191 static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
192 struct object *obj)
194 rev_list_insert_ref(negotiator, NULL, &obj->oid);
197 #define INITIAL_FLUSH 16
198 #define PIPESAFE_FLUSH 32
199 #define LARGE_FLUSH 16384
201 static int next_flush(int stateless_rpc, int count)
203 if (stateless_rpc) {
204 if (count < LARGE_FLUSH)
205 count <<= 1;
206 else
207 count = count * 11 / 10;
208 } else {
209 if (count < PIPESAFE_FLUSH)
210 count <<= 1;
211 else
212 count += PIPESAFE_FLUSH;
214 return count;
217 static void mark_tips(struct fetch_negotiator *negotiator,
218 const struct oid_array *negotiation_tips)
220 int i;
222 if (!negotiation_tips) {
223 for_each_ref(rev_list_insert_ref_oid, negotiator);
224 return;
227 for (i = 0; i < negotiation_tips->nr; i++)
228 rev_list_insert_ref(negotiator, NULL,
229 &negotiation_tips->oid[i]);
230 return;
233 static int find_common(struct fetch_negotiator *negotiator,
234 struct fetch_pack_args *args,
235 int fd[2], struct object_id *result_oid,
236 struct ref *refs)
238 int fetching;
239 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
240 const struct object_id *oid;
241 unsigned in_vain = 0;
242 int got_continue = 0;
243 int got_ready = 0;
244 struct strbuf req_buf = STRBUF_INIT;
245 size_t state_len = 0;
247 if (args->stateless_rpc && multi_ack == 1)
248 die(_("--stateless-rpc requires multi_ack_detailed"));
250 mark_tips(negotiator, args->negotiation_tips);
251 for_each_cached_alternate(negotiator, insert_one_alternate_object);
253 fetching = 0;
254 for ( ; refs ; refs = refs->next) {
255 struct object_id *remote = &refs->old_oid;
256 const char *remote_hex;
257 struct object *o;
260 * If that object is complete (i.e. it is an ancestor of a
261 * local ref), we tell them we have it but do not have to
262 * tell them about its ancestors, which they already know
263 * about.
265 * We use lookup_object here because we are only
266 * interested in the case we *know* the object is
267 * reachable and we have already scanned it.
269 if (((o = lookup_object(remote->hash)) != NULL) &&
270 (o->flags & COMPLETE)) {
271 continue;
274 remote_hex = oid_to_hex(remote);
275 if (!fetching) {
276 struct strbuf c = STRBUF_INIT;
277 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
278 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
279 if (no_done) strbuf_addstr(&c, " no-done");
280 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
281 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
282 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
283 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
284 if (args->no_progress) strbuf_addstr(&c, " no-progress");
285 if (args->include_tag) strbuf_addstr(&c, " include-tag");
286 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
287 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
288 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
289 if (agent_supported) strbuf_addf(&c, " agent=%s",
290 git_user_agent_sanitized());
291 if (args->filter_options.choice)
292 strbuf_addstr(&c, " filter");
293 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
294 strbuf_release(&c);
295 } else
296 packet_buf_write(&req_buf, "want %s\n", remote_hex);
297 fetching++;
300 if (!fetching) {
301 strbuf_release(&req_buf);
302 packet_flush(fd[1]);
303 return 1;
306 if (is_repository_shallow())
307 write_shallow_commits(&req_buf, 1, NULL);
308 if (args->depth > 0)
309 packet_buf_write(&req_buf, "deepen %d", args->depth);
310 if (args->deepen_since) {
311 timestamp_t max_age = approxidate(args->deepen_since);
312 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
314 if (args->deepen_not) {
315 int i;
316 for (i = 0; i < args->deepen_not->nr; i++) {
317 struct string_list_item *s = args->deepen_not->items + i;
318 packet_buf_write(&req_buf, "deepen-not %s", s->string);
321 if (server_supports_filtering && args->filter_options.choice)
322 packet_buf_write(&req_buf, "filter %s",
323 args->filter_options.filter_spec);
324 packet_buf_flush(&req_buf);
325 state_len = req_buf.len;
327 if (args->deepen) {
328 char *line;
329 const char *arg;
330 struct object_id oid;
332 send_request(args, fd[1], &req_buf);
333 while ((line = packet_read_line(fd[0], NULL))) {
334 if (skip_prefix(line, "shallow ", &arg)) {
335 if (get_oid_hex(arg, &oid))
336 die(_("invalid shallow line: %s"), line);
337 register_shallow(&oid);
338 continue;
340 if (skip_prefix(line, "unshallow ", &arg)) {
341 if (get_oid_hex(arg, &oid))
342 die(_("invalid unshallow line: %s"), line);
343 if (!lookup_object(oid.hash))
344 die(_("object not found: %s"), line);
345 /* make sure that it is parsed as shallow */
346 if (!parse_object(&oid))
347 die(_("error in object: %s"), line);
348 if (unregister_shallow(&oid))
349 die(_("no shallow found: %s"), line);
350 continue;
352 die(_("expected shallow/unshallow, got %s"), line);
354 } else if (!args->stateless_rpc)
355 send_request(args, fd[1], &req_buf);
357 if (!args->stateless_rpc) {
358 /* If we aren't using the stateless-rpc interface
359 * we don't need to retain the headers.
361 strbuf_setlen(&req_buf, 0);
362 state_len = 0;
365 flushes = 0;
366 retval = -1;
367 if (args->no_dependents)
368 goto done;
369 while ((oid = negotiator->next(negotiator))) {
370 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
371 print_verbose(args, "have %s", oid_to_hex(oid));
372 in_vain++;
373 if (flush_at <= ++count) {
374 int ack;
376 packet_buf_flush(&req_buf);
377 send_request(args, fd[1], &req_buf);
378 strbuf_setlen(&req_buf, state_len);
379 flushes++;
380 flush_at = next_flush(args->stateless_rpc, count);
383 * We keep one window "ahead" of the other side, and
384 * will wait for an ACK only on the next one
386 if (!args->stateless_rpc && count == INITIAL_FLUSH)
387 continue;
389 consume_shallow_list(args, fd[0]);
390 do {
391 ack = get_ack(fd[0], result_oid);
392 if (ack)
393 print_verbose(args, _("got %s %d %s"), "ack",
394 ack, oid_to_hex(result_oid));
395 switch (ack) {
396 case ACK:
397 flushes = 0;
398 multi_ack = 0;
399 retval = 0;
400 goto done;
401 case ACK_common:
402 case ACK_ready:
403 case ACK_continue: {
404 struct commit *commit =
405 lookup_commit(result_oid);
406 int was_common;
407 if (!commit)
408 die(_("invalid commit %s"), oid_to_hex(result_oid));
409 was_common = negotiator->ack(negotiator, commit);
410 if (args->stateless_rpc
411 && ack == ACK_common
412 && !was_common) {
413 /* We need to replay the have for this object
414 * on the next RPC request so the peer knows
415 * it is in common with us.
417 const char *hex = oid_to_hex(result_oid);
418 packet_buf_write(&req_buf, "have %s\n", hex);
419 state_len = req_buf.len;
421 * Reset in_vain because an ack
422 * for this commit has not been
423 * seen.
425 in_vain = 0;
426 } else if (!args->stateless_rpc
427 || ack != ACK_common)
428 in_vain = 0;
429 retval = 0;
430 got_continue = 1;
431 if (ack == ACK_ready)
432 got_ready = 1;
433 break;
436 } while (ack);
437 flushes--;
438 if (got_continue && MAX_IN_VAIN < in_vain) {
439 print_verbose(args, _("giving up"));
440 break; /* give up */
442 if (got_ready)
443 break;
446 done:
447 if (!got_ready || !no_done) {
448 packet_buf_write(&req_buf, "done\n");
449 send_request(args, fd[1], &req_buf);
451 print_verbose(args, _("done"));
452 if (retval != 0) {
453 multi_ack = 0;
454 flushes++;
456 strbuf_release(&req_buf);
458 if (!got_ready || !no_done)
459 consume_shallow_list(args, fd[0]);
460 while (flushes || multi_ack) {
461 int ack = get_ack(fd[0], result_oid);
462 if (ack) {
463 print_verbose(args, _("got %s (%d) %s"), "ack",
464 ack, oid_to_hex(result_oid));
465 if (ack == ACK)
466 return 0;
467 multi_ack = 1;
468 continue;
470 flushes--;
472 /* it is no error to fetch into a completely empty repo */
473 return count ? retval : 0;
476 static struct commit_list *complete;
478 static int mark_complete(const struct object_id *oid)
480 struct object *o = parse_object(oid);
482 while (o && o->type == OBJ_TAG) {
483 struct tag *t = (struct tag *) o;
484 if (!t->tagged)
485 break; /* broken repository */
486 o->flags |= COMPLETE;
487 o = parse_object(&t->tagged->oid);
489 if (o && o->type == OBJ_COMMIT) {
490 struct commit *commit = (struct commit *)o;
491 if (!(commit->object.flags & COMPLETE)) {
492 commit->object.flags |= COMPLETE;
493 commit_list_insert(commit, &complete);
496 return 0;
499 static int mark_complete_oid(const char *refname, const struct object_id *oid,
500 int flag, void *cb_data)
502 return mark_complete(oid);
505 static void mark_recent_complete_commits(struct fetch_pack_args *args,
506 timestamp_t cutoff)
508 while (complete && cutoff <= complete->item->date) {
509 print_verbose(args, _("Marking %s as complete"),
510 oid_to_hex(&complete->item->object.oid));
511 pop_most_recent_commit(&complete, COMPLETE);
515 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
517 for (; refs; refs = refs->next)
518 oidset_insert(oids, &refs->old_oid);
521 static int tip_oids_contain(struct oidset *tip_oids,
522 struct ref *unmatched, struct ref *newlist,
523 const struct object_id *id)
526 * Note that this only looks at the ref lists the first time it's
527 * called. This works out in filter_refs() because even though it may
528 * add to "newlist" between calls, the additions will always be for
529 * oids that are already in the set.
531 if (!tip_oids->map.map.tablesize) {
532 add_refs_to_oidset(tip_oids, unmatched);
533 add_refs_to_oidset(tip_oids, newlist);
535 return oidset_contains(tip_oids, id);
538 static void filter_refs(struct fetch_pack_args *args,
539 struct ref **refs,
540 struct ref **sought, int nr_sought)
542 struct ref *newlist = NULL;
543 struct ref **newtail = &newlist;
544 struct ref *unmatched = NULL;
545 struct ref *ref, *next;
546 struct oidset tip_oids = OIDSET_INIT;
547 int i;
549 i = 0;
550 for (ref = *refs; ref; ref = next) {
551 int keep = 0;
552 next = ref->next;
554 if (starts_with(ref->name, "refs/") &&
555 check_refname_format(ref->name, 0))
556 ; /* trash */
557 else {
558 while (i < nr_sought) {
559 int cmp = strcmp(ref->name, sought[i]->name);
560 if (cmp < 0)
561 break; /* definitely do not have it */
562 else if (cmp == 0) {
563 keep = 1; /* definitely have it */
564 sought[i]->match_status = REF_MATCHED;
566 i++;
570 if (!keep && args->fetch_all &&
571 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
572 keep = 1;
574 if (keep) {
575 *newtail = ref;
576 ref->next = NULL;
577 newtail = &ref->next;
578 } else {
579 ref->next = unmatched;
580 unmatched = ref;
584 /* Append unmatched requests to the list */
585 for (i = 0; i < nr_sought; i++) {
586 struct object_id oid;
587 const char *p;
589 ref = sought[i];
590 if (ref->match_status != REF_NOT_MATCHED)
591 continue;
592 if (parse_oid_hex(ref->name, &oid, &p) ||
593 *p != '\0' ||
594 oidcmp(&oid, &ref->old_oid))
595 continue;
597 if ((allow_unadvertised_object_request &
598 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
599 tip_oids_contain(&tip_oids, unmatched, newlist,
600 &ref->old_oid)) {
601 ref->match_status = REF_MATCHED;
602 *newtail = copy_ref(ref);
603 newtail = &(*newtail)->next;
604 } else {
605 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
609 oidset_clear(&tip_oids);
610 for (ref = unmatched; ref; ref = next) {
611 next = ref->next;
612 free(ref);
615 *refs = newlist;
618 static void mark_alternate_complete(struct fetch_negotiator *unused,
619 struct object *obj)
621 mark_complete(&obj->oid);
624 struct loose_object_iter {
625 struct oidset *loose_object_set;
626 struct ref *refs;
630 * If the number of refs is not larger than the number of loose objects,
631 * this function stops inserting.
633 static int add_loose_objects_to_set(const struct object_id *oid,
634 const char *path,
635 void *data)
637 struct loose_object_iter *iter = data;
638 oidset_insert(iter->loose_object_set, oid);
639 if (iter->refs == NULL)
640 return 1;
642 iter->refs = iter->refs->next;
643 return 0;
647 * Mark recent commits available locally and reachable from a local ref as
648 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
649 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
650 * thus do not need COMMON_REF marks).
652 * The cutoff time for recency is determined by this heuristic: it is the
653 * earliest commit time of the objects in refs that are commits and that we know
654 * the commit time of.
656 static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
657 struct fetch_pack_args *args,
658 struct ref **refs)
660 struct ref *ref;
661 int old_save_commit_buffer = save_commit_buffer;
662 timestamp_t cutoff = 0;
663 struct oidset loose_oid_set = OIDSET_INIT;
664 int use_oidset = 0;
665 struct loose_object_iter iter = {&loose_oid_set, *refs};
667 /* Enumerate all loose objects or know refs are not so many. */
668 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
669 &iter, 0);
671 save_commit_buffer = 0;
673 for (ref = *refs; ref; ref = ref->next) {
674 struct object *o;
675 unsigned int flags = OBJECT_INFO_QUICK;
677 if (use_oidset &&
678 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
680 * I know this does not exist in the loose form,
681 * so check if it exists in a non-loose form.
683 flags |= OBJECT_INFO_IGNORE_LOOSE;
686 if (!has_object_file_with_flags(&ref->old_oid, flags))
687 continue;
688 o = parse_object(&ref->old_oid);
689 if (!o)
690 continue;
692 /* We already have it -- which may mean that we were
693 * in sync with the other side at some time after
694 * that (it is OK if we guess wrong here).
696 if (o->type == OBJ_COMMIT) {
697 struct commit *commit = (struct commit *)o;
698 if (!cutoff || cutoff < commit->date)
699 cutoff = commit->date;
703 oidset_clear(&loose_oid_set);
705 if (!args->no_dependents) {
706 if (!args->deepen) {
707 for_each_ref(mark_complete_oid, NULL);
708 for_each_cached_alternate(NULL, mark_alternate_complete);
709 commit_list_sort_by_date(&complete);
710 if (cutoff)
711 mark_recent_complete_commits(args, cutoff);
715 * Mark all complete remote refs as common refs.
716 * Don't mark them common yet; the server has to be told so first.
718 for (ref = *refs; ref; ref = ref->next) {
719 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
720 NULL, 0);
722 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
723 continue;
725 negotiator->known_common(negotiator,
726 (struct commit *)o);
730 save_commit_buffer = old_save_commit_buffer;
734 * Returns 1 if every object pointed to by the given remote refs is available
735 * locally and reachable from a local ref, and 0 otherwise.
737 static int everything_local(struct fetch_pack_args *args,
738 struct ref **refs)
740 struct ref *ref;
741 int retval;
743 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
744 const struct object_id *remote = &ref->old_oid;
745 struct object *o;
747 o = lookup_object(remote->hash);
748 if (!o || !(o->flags & COMPLETE)) {
749 retval = 0;
750 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
751 ref->name);
752 continue;
754 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
755 ref->name);
758 return retval;
761 static int sideband_demux(int in, int out, void *data)
763 int *xd = data;
764 int ret;
766 ret = recv_sideband("fetch-pack", xd[0], out);
767 close(out);
768 return ret;
771 static int get_pack(struct fetch_pack_args *args,
772 int xd[2], char **pack_lockfile)
774 struct async demux;
775 int do_keep = args->keep_pack;
776 const char *cmd_name;
777 struct pack_header header;
778 int pass_header = 0;
779 struct child_process cmd = CHILD_PROCESS_INIT;
780 int ret;
782 memset(&demux, 0, sizeof(demux));
783 if (use_sideband) {
784 /* xd[] is talking with upload-pack; subprocess reads from
785 * xd[0], spits out band#2 to stderr, and feeds us band#1
786 * through demux->out.
788 demux.proc = sideband_demux;
789 demux.data = xd;
790 demux.out = -1;
791 demux.isolate_sigpipe = 1;
792 if (start_async(&demux))
793 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
795 else
796 demux.out = xd[0];
798 if (!args->keep_pack && unpack_limit) {
800 if (read_pack_header(demux.out, &header))
801 die(_("protocol error: bad pack header"));
802 pass_header = 1;
803 if (ntohl(header.hdr_entries) < unpack_limit)
804 do_keep = 0;
805 else
806 do_keep = 1;
809 if (alternate_shallow_file) {
810 argv_array_push(&cmd.args, "--shallow-file");
811 argv_array_push(&cmd.args, alternate_shallow_file);
814 if (do_keep || args->from_promisor) {
815 if (pack_lockfile)
816 cmd.out = -1;
817 cmd_name = "index-pack";
818 argv_array_push(&cmd.args, cmd_name);
819 argv_array_push(&cmd.args, "--stdin");
820 if (!args->quiet && !args->no_progress)
821 argv_array_push(&cmd.args, "-v");
822 if (args->use_thin_pack)
823 argv_array_push(&cmd.args, "--fix-thin");
824 if (do_keep && (args->lock_pack || unpack_limit)) {
825 char hostname[HOST_NAME_MAX + 1];
826 if (xgethostname(hostname, sizeof(hostname)))
827 xsnprintf(hostname, sizeof(hostname), "localhost");
828 argv_array_pushf(&cmd.args,
829 "--keep=fetch-pack %"PRIuMAX " on %s",
830 (uintmax_t)getpid(), hostname);
832 if (args->check_self_contained_and_connected)
833 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
834 if (args->from_promisor)
835 argv_array_push(&cmd.args, "--promisor");
837 else {
838 cmd_name = "unpack-objects";
839 argv_array_push(&cmd.args, cmd_name);
840 if (args->quiet || args->no_progress)
841 argv_array_push(&cmd.args, "-q");
842 args->check_self_contained_and_connected = 0;
845 if (pass_header)
846 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
847 ntohl(header.hdr_version),
848 ntohl(header.hdr_entries));
849 if (fetch_fsck_objects >= 0
850 ? fetch_fsck_objects
851 : transfer_fsck_objects >= 0
852 ? transfer_fsck_objects
853 : 0) {
854 if (args->from_promisor)
856 * We cannot use --strict in index-pack because it
857 * checks both broken objects and links, but we only
858 * want to check for broken objects.
860 argv_array_push(&cmd.args, "--fsck-objects");
861 else
862 argv_array_push(&cmd.args, "--strict");
865 cmd.in = demux.out;
866 cmd.git_cmd = 1;
867 if (start_command(&cmd))
868 die(_("fetch-pack: unable to fork off %s"), cmd_name);
869 if (do_keep && pack_lockfile) {
870 *pack_lockfile = index_pack_lockfile(cmd.out);
871 close(cmd.out);
874 if (!use_sideband)
875 /* Closed by start_command() */
876 xd[0] = -1;
878 ret = finish_command(&cmd);
879 if (!ret || (args->check_self_contained_and_connected && ret == 1))
880 args->self_contained_and_connected =
881 args->check_self_contained_and_connected &&
882 ret == 0;
883 else
884 die(_("%s failed"), cmd_name);
885 if (use_sideband && finish_async(&demux))
886 die(_("error in sideband demultiplexer"));
887 return 0;
890 static int cmp_ref_by_name(const void *a_, const void *b_)
892 const struct ref *a = *((const struct ref **)a_);
893 const struct ref *b = *((const struct ref **)b_);
894 return strcmp(a->name, b->name);
897 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
898 int fd[2],
899 const struct ref *orig_ref,
900 struct ref **sought, int nr_sought,
901 struct shallow_info *si,
902 char **pack_lockfile)
904 struct ref *ref = copy_ref_list(orig_ref);
905 struct object_id oid;
906 const char *agent_feature;
907 int agent_len;
908 struct fetch_negotiator negotiator;
909 fetch_negotiator_init(&negotiator, negotiation_algorithm);
911 sort_ref_list(&ref, ref_compare_name);
912 QSORT(sought, nr_sought, cmp_ref_by_name);
914 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
915 die(_("Server does not support shallow clients"));
916 if (args->depth > 0 || args->deepen_since || args->deepen_not)
917 args->deepen = 1;
918 if (server_supports("multi_ack_detailed")) {
919 print_verbose(args, _("Server supports multi_ack_detailed"));
920 multi_ack = 2;
921 if (server_supports("no-done")) {
922 print_verbose(args, _("Server supports no-done"));
923 if (args->stateless_rpc)
924 no_done = 1;
927 else if (server_supports("multi_ack")) {
928 print_verbose(args, _("Server supports multi_ack"));
929 multi_ack = 1;
931 if (server_supports("side-band-64k")) {
932 print_verbose(args, _("Server supports side-band-64k"));
933 use_sideband = 2;
935 else if (server_supports("side-band")) {
936 print_verbose(args, _("Server supports side-band"));
937 use_sideband = 1;
939 if (server_supports("allow-tip-sha1-in-want")) {
940 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
941 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
943 if (server_supports("allow-reachable-sha1-in-want")) {
944 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
945 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
947 if (!server_supports("thin-pack"))
948 args->use_thin_pack = 0;
949 if (!server_supports("no-progress"))
950 args->no_progress = 0;
951 if (!server_supports("include-tag"))
952 args->include_tag = 0;
953 if (server_supports("ofs-delta"))
954 print_verbose(args, _("Server supports ofs-delta"));
955 else
956 prefer_ofs_delta = 0;
958 if (server_supports("filter")) {
959 server_supports_filtering = 1;
960 print_verbose(args, _("Server supports filter"));
961 } else if (args->filter_options.choice) {
962 warning("filtering not recognized by server, ignoring");
965 if ((agent_feature = server_feature_value("agent", &agent_len))) {
966 agent_supported = 1;
967 if (agent_len)
968 print_verbose(args, _("Server version is %.*s"),
969 agent_len, agent_feature);
971 if (server_supports("deepen-since"))
972 deepen_since_ok = 1;
973 else if (args->deepen_since)
974 die(_("Server does not support --shallow-since"));
975 if (server_supports("deepen-not"))
976 deepen_not_ok = 1;
977 else if (args->deepen_not)
978 die(_("Server does not support --shallow-exclude"));
979 if (!server_supports("deepen-relative") && args->deepen_relative)
980 die(_("Server does not support --deepen"));
982 mark_complete_and_common_ref(&negotiator, args, &ref);
983 filter_refs(args, &ref, sought, nr_sought);
984 if (everything_local(args, &ref)) {
985 packet_flush(fd[1]);
986 goto all_done;
988 if (find_common(&negotiator, args, fd, &oid, ref) < 0)
989 if (!args->keep_pack)
990 /* When cloning, it is not unusual to have
991 * no common commit.
993 warning(_("no common commits"));
995 if (args->stateless_rpc)
996 packet_flush(fd[1]);
997 if (args->deepen)
998 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
999 NULL);
1000 else if (si->nr_ours || si->nr_theirs)
1001 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1002 else
1003 alternate_shallow_file = NULL;
1004 if (get_pack(args, fd, pack_lockfile))
1005 die(_("git fetch-pack: fetch failed."));
1007 all_done:
1008 negotiator.release(&negotiator);
1009 return ref;
1012 static void add_shallow_requests(struct strbuf *req_buf,
1013 const struct fetch_pack_args *args)
1015 if (is_repository_shallow())
1016 write_shallow_commits(req_buf, 1, NULL);
1017 if (args->depth > 0)
1018 packet_buf_write(req_buf, "deepen %d", args->depth);
1019 if (args->deepen_since) {
1020 timestamp_t max_age = approxidate(args->deepen_since);
1021 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1023 if (args->deepen_not) {
1024 int i;
1025 for (i = 0; i < args->deepen_not->nr; i++) {
1026 struct string_list_item *s = args->deepen_not->items + i;
1027 packet_buf_write(req_buf, "deepen-not %s", s->string);
1032 static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1034 for ( ; wants ; wants = wants->next) {
1035 const struct object_id *remote = &wants->old_oid;
1036 const char *remote_hex;
1037 struct object *o;
1040 * If that object is complete (i.e. it is an ancestor of a
1041 * local ref), we tell them we have it but do not have to
1042 * tell them about its ancestors, which they already know
1043 * about.
1045 * We use lookup_object here because we are only
1046 * interested in the case we *know* the object is
1047 * reachable and we have already scanned it.
1049 if (((o = lookup_object(remote->hash)) != NULL) &&
1050 (o->flags & COMPLETE)) {
1051 continue;
1054 remote_hex = oid_to_hex(remote);
1055 packet_buf_write(req_buf, "want %s\n", remote_hex);
1059 static void add_common(struct strbuf *req_buf, struct oidset *common)
1061 struct oidset_iter iter;
1062 const struct object_id *oid;
1063 oidset_iter_init(common, &iter);
1065 while ((oid = oidset_iter_next(&iter))) {
1066 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1070 static int add_haves(struct fetch_negotiator *negotiator,
1071 struct strbuf *req_buf,
1072 int *haves_to_send, int *in_vain)
1074 int ret = 0;
1075 int haves_added = 0;
1076 const struct object_id *oid;
1078 while ((oid = negotiator->next(negotiator))) {
1079 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1080 if (++haves_added >= *haves_to_send)
1081 break;
1084 *in_vain += haves_added;
1085 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1086 /* Send Done */
1087 packet_buf_write(req_buf, "done\n");
1088 ret = 1;
1091 /* Increase haves to send on next round */
1092 *haves_to_send = next_flush(1, *haves_to_send);
1094 return ret;
1097 static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
1098 const struct fetch_pack_args *args,
1099 const struct ref *wants, struct oidset *common,
1100 int *haves_to_send, int *in_vain)
1102 int ret = 0;
1103 struct strbuf req_buf = STRBUF_INIT;
1105 if (server_supports_v2("fetch", 1))
1106 packet_buf_write(&req_buf, "command=fetch");
1107 if (server_supports_v2("agent", 0))
1108 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1109 if (args->server_options && args->server_options->nr &&
1110 server_supports_v2("server-option", 1)) {
1111 int i;
1112 for (i = 0; i < args->server_options->nr; i++)
1113 packet_write_fmt(fd_out, "server-option=%s",
1114 args->server_options->items[i].string);
1117 packet_buf_delim(&req_buf);
1118 if (args->use_thin_pack)
1119 packet_buf_write(&req_buf, "thin-pack");
1120 if (args->no_progress)
1121 packet_buf_write(&req_buf, "no-progress");
1122 if (args->include_tag)
1123 packet_buf_write(&req_buf, "include-tag");
1124 if (prefer_ofs_delta)
1125 packet_buf_write(&req_buf, "ofs-delta");
1127 /* Add shallow-info and deepen request */
1128 if (server_supports_feature("fetch", "shallow", 0))
1129 add_shallow_requests(&req_buf, args);
1130 else if (is_repository_shallow() || args->deepen)
1131 die(_("Server does not support shallow requests"));
1133 /* Add filter */
1134 if (server_supports_feature("fetch", "filter", 0) &&
1135 args->filter_options.choice) {
1136 print_verbose(args, _("Server supports filter"));
1137 packet_buf_write(&req_buf, "filter %s",
1138 args->filter_options.filter_spec);
1139 } else if (args->filter_options.choice) {
1140 warning("filtering not recognized by server, ignoring");
1143 /* add wants */
1144 add_wants(wants, &req_buf);
1146 if (args->no_dependents) {
1147 packet_buf_write(&req_buf, "done");
1148 ret = 1;
1149 } else {
1150 /* Add all of the common commits we've found in previous rounds */
1151 add_common(&req_buf, common);
1153 /* Add initial haves */
1154 ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
1157 /* Send request */
1158 packet_buf_flush(&req_buf);
1159 write_or_die(fd_out, req_buf.buf, req_buf.len);
1161 strbuf_release(&req_buf);
1162 return ret;
1166 * Processes a section header in a server's response and checks if it matches
1167 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1168 * not consumed); if 0, the line will be consumed and the function will die if
1169 * the section header doesn't match what was expected.
1171 static int process_section_header(struct packet_reader *reader,
1172 const char *section, int peek)
1174 int ret;
1176 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1177 die("error reading section header '%s'", section);
1179 ret = !strcmp(reader->line, section);
1181 if (!peek) {
1182 if (!ret)
1183 die("expected '%s', received '%s'",
1184 section, reader->line);
1185 packet_reader_read(reader);
1188 return ret;
1191 static int process_acks(struct fetch_negotiator *negotiator,
1192 struct packet_reader *reader,
1193 struct oidset *common)
1195 /* received */
1196 int received_ready = 0;
1197 int received_ack = 0;
1199 process_section_header(reader, "acknowledgments", 0);
1200 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1201 const char *arg;
1203 if (!strcmp(reader->line, "NAK"))
1204 continue;
1206 if (skip_prefix(reader->line, "ACK ", &arg)) {
1207 struct object_id oid;
1208 if (!get_oid_hex(arg, &oid)) {
1209 struct commit *commit;
1210 oidset_insert(common, &oid);
1211 commit = lookup_commit(&oid);
1212 negotiator->ack(negotiator, commit);
1214 continue;
1217 if (!strcmp(reader->line, "ready")) {
1218 received_ready = 1;
1219 continue;
1222 die("unexpected acknowledgment line: '%s'", reader->line);
1225 if (reader->status != PACKET_READ_FLUSH &&
1226 reader->status != PACKET_READ_DELIM)
1227 die("error processing acks: %d", reader->status);
1229 /* return 0 if no common, 1 if there are common, or 2 if ready */
1230 return received_ready ? 2 : (received_ack ? 1 : 0);
1233 static void receive_shallow_info(struct fetch_pack_args *args,
1234 struct packet_reader *reader)
1236 process_section_header(reader, "shallow-info", 0);
1237 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1238 const char *arg;
1239 struct object_id oid;
1241 if (skip_prefix(reader->line, "shallow ", &arg)) {
1242 if (get_oid_hex(arg, &oid))
1243 die(_("invalid shallow line: %s"), reader->line);
1244 register_shallow(&oid);
1245 continue;
1247 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1248 if (get_oid_hex(arg, &oid))
1249 die(_("invalid unshallow line: %s"), reader->line);
1250 if (!lookup_object(oid.hash))
1251 die(_("object not found: %s"), reader->line);
1252 /* make sure that it is parsed as shallow */
1253 if (!parse_object(&oid))
1254 die(_("error in object: %s"), reader->line);
1255 if (unregister_shallow(&oid))
1256 die(_("no shallow found: %s"), reader->line);
1257 continue;
1259 die(_("expected shallow/unshallow, got %s"), reader->line);
1262 if (reader->status != PACKET_READ_FLUSH &&
1263 reader->status != PACKET_READ_DELIM)
1264 die("error processing shallow info: %d", reader->status);
1266 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1267 args->deepen = 1;
1270 enum fetch_state {
1271 FETCH_CHECK_LOCAL = 0,
1272 FETCH_SEND_REQUEST,
1273 FETCH_PROCESS_ACKS,
1274 FETCH_GET_PACK,
1275 FETCH_DONE,
1278 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1279 int fd[2],
1280 const struct ref *orig_ref,
1281 struct ref **sought, int nr_sought,
1282 char **pack_lockfile)
1284 struct ref *ref = copy_ref_list(orig_ref);
1285 enum fetch_state state = FETCH_CHECK_LOCAL;
1286 struct oidset common = OIDSET_INIT;
1287 struct packet_reader reader;
1288 int in_vain = 0;
1289 int haves_to_send = INITIAL_FLUSH;
1290 struct fetch_negotiator negotiator;
1291 fetch_negotiator_init(&negotiator, negotiation_algorithm);
1292 packet_reader_init(&reader, fd[0], NULL, 0,
1293 PACKET_READ_CHOMP_NEWLINE);
1295 while (state != FETCH_DONE) {
1296 switch (state) {
1297 case FETCH_CHECK_LOCAL:
1298 sort_ref_list(&ref, ref_compare_name);
1299 QSORT(sought, nr_sought, cmp_ref_by_name);
1301 /* v2 supports these by default */
1302 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1303 use_sideband = 2;
1304 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1305 args->deepen = 1;
1307 /* Filter 'ref' by 'sought' and those that aren't local */
1308 mark_complete_and_common_ref(&negotiator, args, &ref);
1309 filter_refs(args, &ref, sought, nr_sought);
1310 if (everything_local(args, &ref))
1311 state = FETCH_DONE;
1312 else
1313 state = FETCH_SEND_REQUEST;
1315 mark_tips(&negotiator, args->negotiation_tips);
1316 for_each_cached_alternate(&negotiator,
1317 insert_one_alternate_object);
1318 break;
1319 case FETCH_SEND_REQUEST:
1320 if (send_fetch_request(&negotiator, fd[1], args, ref,
1321 &common,
1322 &haves_to_send, &in_vain))
1323 state = FETCH_GET_PACK;
1324 else
1325 state = FETCH_PROCESS_ACKS;
1326 break;
1327 case FETCH_PROCESS_ACKS:
1328 /* Process ACKs/NAKs */
1329 switch (process_acks(&negotiator, &reader, &common)) {
1330 case 2:
1331 state = FETCH_GET_PACK;
1332 break;
1333 case 1:
1334 in_vain = 0;
1335 /* fallthrough */
1336 default:
1337 state = FETCH_SEND_REQUEST;
1338 break;
1340 break;
1341 case FETCH_GET_PACK:
1342 /* Check for shallow-info section */
1343 if (process_section_header(&reader, "shallow-info", 1))
1344 receive_shallow_info(args, &reader);
1346 /* get the pack */
1347 process_section_header(&reader, "packfile", 0);
1348 if (get_pack(args, fd, pack_lockfile))
1349 die(_("git fetch-pack: fetch failed."));
1351 state = FETCH_DONE;
1352 break;
1353 case FETCH_DONE:
1354 continue;
1358 negotiator.release(&negotiator);
1359 oidset_clear(&common);
1360 return ref;
1363 static void fetch_pack_config(void)
1365 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1366 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1367 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1368 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1369 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1370 git_config_get_string("fetch.negotiationalgorithm",
1371 &negotiation_algorithm);
1373 git_config(git_default_config, NULL);
1376 static void fetch_pack_setup(void)
1378 static int did_setup;
1379 if (did_setup)
1380 return;
1381 fetch_pack_config();
1382 if (0 <= transfer_unpack_limit)
1383 unpack_limit = transfer_unpack_limit;
1384 else if (0 <= fetch_unpack_limit)
1385 unpack_limit = fetch_unpack_limit;
1386 did_setup = 1;
1389 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1391 struct string_list names = STRING_LIST_INIT_NODUP;
1392 int src, dst;
1394 for (src = dst = 0; src < nr; src++) {
1395 struct string_list_item *item;
1396 item = string_list_insert(&names, ref[src]->name);
1397 if (item->util)
1398 continue; /* already have it */
1399 item->util = ref[src];
1400 if (src != dst)
1401 ref[dst] = ref[src];
1402 dst++;
1404 for (src = dst; src < nr; src++)
1405 ref[src] = NULL;
1406 string_list_clear(&names, 0);
1407 return dst;
1410 static void update_shallow(struct fetch_pack_args *args,
1411 struct ref **sought, int nr_sought,
1412 struct shallow_info *si)
1414 struct oid_array ref = OID_ARRAY_INIT;
1415 int *status;
1416 int i;
1418 if (args->deepen && alternate_shallow_file) {
1419 if (*alternate_shallow_file == '\0') { /* --unshallow */
1420 unlink_or_warn(git_path_shallow());
1421 rollback_lock_file(&shallow_lock);
1422 } else
1423 commit_lock_file(&shallow_lock);
1424 return;
1427 if (!si->shallow || !si->shallow->nr)
1428 return;
1430 if (args->cloning) {
1432 * remote is shallow, but this is a clone, there are
1433 * no objects in repo to worry about. Accept any
1434 * shallow points that exist in the pack (iow in repo
1435 * after get_pack() and reprepare_packed_git())
1437 struct oid_array extra = OID_ARRAY_INIT;
1438 struct object_id *oid = si->shallow->oid;
1439 for (i = 0; i < si->shallow->nr; i++)
1440 if (has_object_file(&oid[i]))
1441 oid_array_append(&extra, &oid[i]);
1442 if (extra.nr) {
1443 setup_alternate_shallow(&shallow_lock,
1444 &alternate_shallow_file,
1445 &extra);
1446 commit_lock_file(&shallow_lock);
1448 oid_array_clear(&extra);
1449 return;
1452 if (!si->nr_ours && !si->nr_theirs)
1453 return;
1455 remove_nonexistent_theirs_shallow(si);
1456 if (!si->nr_ours && !si->nr_theirs)
1457 return;
1458 for (i = 0; i < nr_sought; i++)
1459 oid_array_append(&ref, &sought[i]->old_oid);
1460 si->ref = &ref;
1462 if (args->update_shallow) {
1464 * remote is also shallow, .git/shallow may be updated
1465 * so all refs can be accepted. Make sure we only add
1466 * shallow roots that are actually reachable from new
1467 * refs.
1469 struct oid_array extra = OID_ARRAY_INIT;
1470 struct object_id *oid = si->shallow->oid;
1471 assign_shallow_commits_to_refs(si, NULL, NULL);
1472 if (!si->nr_ours && !si->nr_theirs) {
1473 oid_array_clear(&ref);
1474 return;
1476 for (i = 0; i < si->nr_ours; i++)
1477 oid_array_append(&extra, &oid[si->ours[i]]);
1478 for (i = 0; i < si->nr_theirs; i++)
1479 oid_array_append(&extra, &oid[si->theirs[i]]);
1480 setup_alternate_shallow(&shallow_lock,
1481 &alternate_shallow_file,
1482 &extra);
1483 commit_lock_file(&shallow_lock);
1484 oid_array_clear(&extra);
1485 oid_array_clear(&ref);
1486 return;
1490 * remote is also shallow, check what ref is safe to update
1491 * without updating .git/shallow
1493 status = xcalloc(nr_sought, sizeof(*status));
1494 assign_shallow_commits_to_refs(si, NULL, status);
1495 if (si->nr_ours || si->nr_theirs) {
1496 for (i = 0; i < nr_sought; i++)
1497 if (status[i])
1498 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1500 free(status);
1501 oid_array_clear(&ref);
1504 struct ref *fetch_pack(struct fetch_pack_args *args,
1505 int fd[], struct child_process *conn,
1506 const struct ref *ref,
1507 const char *dest,
1508 struct ref **sought, int nr_sought,
1509 struct oid_array *shallow,
1510 char **pack_lockfile,
1511 enum protocol_version version)
1513 struct ref *ref_cpy;
1514 struct shallow_info si;
1516 fetch_pack_setup();
1517 if (nr_sought)
1518 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1520 if (!ref) {
1521 packet_flush(fd[1]);
1522 die(_("no matching remote head"));
1524 prepare_shallow_info(&si, shallow);
1525 if (version == protocol_v2)
1526 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1527 pack_lockfile);
1528 else
1529 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1530 &si, pack_lockfile);
1531 reprepare_packed_git(the_repository);
1532 update_shallow(args, sought, nr_sought, &si);
1533 clear_shallow_info(&si);
1534 return ref_cpy;
1537 int report_unmatched_refs(struct ref **sought, int nr_sought)
1539 int i, ret = 0;
1541 for (i = 0; i < nr_sought; i++) {
1542 if (!sought[i])
1543 continue;
1544 switch (sought[i]->match_status) {
1545 case REF_MATCHED:
1546 continue;
1547 case REF_NOT_MATCHED:
1548 error(_("no such remote ref %s"), sought[i]->name);
1549 break;
1550 case REF_UNADVERTISED_NOT_ALLOWED:
1551 error(_("Server does not allow request for unadvertised object %s"),
1552 sought[i]->name);
1553 break;
1555 ret = 1;
1557 return ret;