fetch-pack: use ref adv. to prune "have" sent
[git.git] / fetch-pack.c
blob806c40021f0d3602566f28344de4a4c08ffd649e
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec-cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "prio-queue.h"
19 #include "sha1-array.h"
20 #include "oidset.h"
21 #include "packfile.h"
23 static int transfer_unpack_limit = -1;
24 static int fetch_unpack_limit = -1;
25 static int unpack_limit = 100;
26 static int prefer_ofs_delta = 1;
27 static int no_done;
28 static int deepen_since_ok;
29 static int deepen_not_ok;
30 static int fetch_fsck_objects = -1;
31 static int transfer_fsck_objects = -1;
32 static int agent_supported;
33 static int server_supports_filtering;
34 static struct lock_file shallow_lock;
35 static const char *alternate_shallow_file;
37 /* Remember to update object flag allocation in object.h */
38 #define COMPLETE (1U << 0)
39 #define COMMON (1U << 1)
40 #define COMMON_REF (1U << 2)
41 #define SEEN (1U << 3)
42 #define POPPED (1U << 4)
43 #define ALTERNATE (1U << 5)
45 static int marked;
48 * After sending this many "have"s if we do not get any new ACK , we
49 * give up traversing our history.
51 #define MAX_IN_VAIN 256
53 static struct prio_queue rev_list = { compare_commits_by_commit_date };
54 static int non_common_revs, multi_ack, use_sideband;
55 /* Allow specifying sha1 if it is a ref tip. */
56 #define ALLOW_TIP_SHA1 01
57 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
58 #define ALLOW_REACHABLE_SHA1 02
59 static unsigned int allow_unadvertised_object_request;
61 __attribute__((format (printf, 2, 3)))
62 static inline void print_verbose(const struct fetch_pack_args *args,
63 const char *fmt, ...)
65 va_list params;
67 if (!args->verbose)
68 return;
70 va_start(params, fmt);
71 vfprintf(stderr, fmt, params);
72 va_end(params);
73 fputc('\n', stderr);
76 struct alternate_object_cache {
77 struct object **items;
78 size_t nr, alloc;
81 static void cache_one_alternate(const char *refname,
82 const struct object_id *oid,
83 void *vcache)
85 struct alternate_object_cache *cache = vcache;
86 struct object *obj = parse_object(oid);
88 if (!obj || (obj->flags & ALTERNATE))
89 return;
91 obj->flags |= ALTERNATE;
92 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
93 cache->items[cache->nr++] = obj;
96 static void for_each_cached_alternate(void (*cb)(struct object *))
98 static int initialized;
99 static struct alternate_object_cache cache;
100 size_t i;
102 if (!initialized) {
103 for_each_alternate_ref(cache_one_alternate, &cache);
104 initialized = 1;
107 for (i = 0; i < cache.nr; i++)
108 cb(cache.items[i]);
111 static void rev_list_push(struct commit *commit, int mark)
113 if (!(commit->object.flags & mark)) {
114 commit->object.flags |= mark;
116 if (parse_commit(commit))
117 return;
119 prio_queue_put(&rev_list, commit);
121 if (!(commit->object.flags & COMMON))
122 non_common_revs++;
126 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128 struct object *o = deref_tag(parse_object(oid), refname, 0);
130 if (o && o->type == OBJ_COMMIT)
131 rev_list_push((struct commit *)o, SEEN);
133 return 0;
136 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
137 int flag, void *cb_data)
139 return rev_list_insert_ref(refname, oid);
142 static int clear_marks(const char *refname, const struct object_id *oid,
143 int flag, void *cb_data)
145 struct object *o = deref_tag(parse_object(oid), refname, 0);
147 if (o && o->type == OBJ_COMMIT)
148 clear_commit_marks((struct commit *)o,
149 COMMON | COMMON_REF | SEEN | POPPED);
150 return 0;
154 This function marks a rev and its ancestors as common.
155 In some cases, it is desirable to mark only the ancestors (for example
156 when only the server does not yet know that they are common).
159 static void mark_common(struct commit *commit,
160 int ancestors_only, int dont_parse)
162 if (commit != NULL && !(commit->object.flags & COMMON)) {
163 struct object *o = (struct object *)commit;
165 if (!ancestors_only)
166 o->flags |= COMMON;
168 if (!(o->flags & SEEN))
169 rev_list_push(commit, SEEN);
170 else {
171 struct commit_list *parents;
173 if (!ancestors_only && !(o->flags & POPPED))
174 non_common_revs--;
175 if (!o->parsed && !dont_parse)
176 if (parse_commit(commit))
177 return;
179 for (parents = commit->parents;
180 parents;
181 parents = parents->next)
182 mark_common(parents->item, 0, dont_parse);
188 Get the next rev to send, ignoring the common.
191 static const struct object_id *get_rev(void)
193 struct commit *commit = NULL;
195 while (commit == NULL) {
196 unsigned int mark;
197 struct commit_list *parents;
199 if (rev_list.nr == 0 || non_common_revs == 0)
200 return NULL;
202 commit = prio_queue_get(&rev_list);
203 parse_commit(commit);
204 parents = commit->parents;
206 commit->object.flags |= POPPED;
207 if (!(commit->object.flags & COMMON))
208 non_common_revs--;
210 if (commit->object.flags & COMMON) {
211 /* do not send "have", and ignore ancestors */
212 commit = NULL;
213 mark = COMMON | SEEN;
214 } else if (commit->object.flags & COMMON_REF)
215 /* send "have", and ignore ancestors */
216 mark = COMMON | SEEN;
217 else
218 /* send "have", also for its ancestors */
219 mark = SEEN;
221 while (parents) {
222 if (!(parents->item->object.flags & SEEN))
223 rev_list_push(parents->item, mark);
224 if (mark & COMMON)
225 mark_common(parents->item, 1, 0);
226 parents = parents->next;
230 return &commit->object.oid;
233 enum ack_type {
234 NAK = 0,
235 ACK,
236 ACK_continue,
237 ACK_common,
238 ACK_ready
241 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
243 if (args->stateless_rpc && args->deepen) {
244 /* If we sent a depth we will get back "duplicate"
245 * shallow and unshallow commands every time there
246 * is a block of have lines exchanged.
248 char *line;
249 while ((line = packet_read_line(fd, NULL))) {
250 if (starts_with(line, "shallow "))
251 continue;
252 if (starts_with(line, "unshallow "))
253 continue;
254 die(_("git fetch-pack: expected shallow list"));
259 static enum ack_type get_ack(int fd, struct object_id *result_oid)
261 int len;
262 char *line = packet_read_line(fd, &len);
263 const char *arg;
265 if (!line)
266 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
267 if (!strcmp(line, "NAK"))
268 return NAK;
269 if (skip_prefix(line, "ACK ", &arg)) {
270 if (!get_oid_hex(arg, result_oid)) {
271 arg += 40;
272 len -= arg - line;
273 if (len < 1)
274 return ACK;
275 if (strstr(arg, "continue"))
276 return ACK_continue;
277 if (strstr(arg, "common"))
278 return ACK_common;
279 if (strstr(arg, "ready"))
280 return ACK_ready;
281 return ACK;
284 if (skip_prefix(line, "ERR ", &arg))
285 die(_("remote error: %s"), arg);
286 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
289 static void send_request(struct fetch_pack_args *args,
290 int fd, struct strbuf *buf)
292 if (args->stateless_rpc) {
293 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
294 packet_flush(fd);
295 } else
296 write_or_die(fd, buf->buf, buf->len);
299 static void insert_one_alternate_object(struct object *obj)
301 rev_list_insert_ref(NULL, &obj->oid);
304 #define INITIAL_FLUSH 16
305 #define PIPESAFE_FLUSH 32
306 #define LARGE_FLUSH 16384
308 static int next_flush(int stateless_rpc, int count)
310 if (stateless_rpc) {
311 if (count < LARGE_FLUSH)
312 count <<= 1;
313 else
314 count = count * 11 / 10;
315 } else {
316 if (count < PIPESAFE_FLUSH)
317 count <<= 1;
318 else
319 count += PIPESAFE_FLUSH;
321 return count;
324 static int find_common(struct fetch_pack_args *args,
325 int fd[2], struct object_id *result_oid,
326 struct ref *refs)
328 int fetching;
329 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
330 const struct object_id *oid;
331 unsigned in_vain = 0;
332 int got_continue = 0;
333 int got_ready = 0;
334 struct strbuf req_buf = STRBUF_INIT;
335 size_t state_len = 0;
337 if (args->stateless_rpc && multi_ack == 1)
338 die(_("--stateless-rpc requires multi_ack_detailed"));
340 for_each_ref(rev_list_insert_ref_oid, NULL);
341 for_each_cached_alternate(insert_one_alternate_object);
343 fetching = 0;
344 for ( ; refs ; refs = refs->next) {
345 struct object_id *remote = &refs->old_oid;
346 const char *remote_hex;
347 struct object *o;
350 * If that object is complete (i.e. it is an ancestor of a
351 * local ref), we tell them we have it but do not have to
352 * tell them about its ancestors, which they already know
353 * about.
355 * We use lookup_object here because we are only
356 * interested in the case we *know* the object is
357 * reachable and we have already scanned it.
359 if (((o = lookup_object(remote->hash)) != NULL) &&
360 (o->flags & COMPLETE)) {
361 continue;
364 remote_hex = oid_to_hex(remote);
365 if (!fetching) {
366 struct strbuf c = STRBUF_INIT;
367 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
368 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
369 if (no_done) strbuf_addstr(&c, " no-done");
370 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
371 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
372 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
373 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
374 if (args->no_progress) strbuf_addstr(&c, " no-progress");
375 if (args->include_tag) strbuf_addstr(&c, " include-tag");
376 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
377 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
378 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
379 if (agent_supported) strbuf_addf(&c, " agent=%s",
380 git_user_agent_sanitized());
381 if (args->filter_options.choice)
382 strbuf_addstr(&c, " filter");
383 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
384 strbuf_release(&c);
385 } else
386 packet_buf_write(&req_buf, "want %s\n", remote_hex);
387 fetching++;
390 if (!fetching) {
391 strbuf_release(&req_buf);
392 packet_flush(fd[1]);
393 return 1;
396 if (is_repository_shallow())
397 write_shallow_commits(&req_buf, 1, NULL);
398 if (args->depth > 0)
399 packet_buf_write(&req_buf, "deepen %d", args->depth);
400 if (args->deepen_since) {
401 timestamp_t max_age = approxidate(args->deepen_since);
402 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
404 if (args->deepen_not) {
405 int i;
406 for (i = 0; i < args->deepen_not->nr; i++) {
407 struct string_list_item *s = args->deepen_not->items + i;
408 packet_buf_write(&req_buf, "deepen-not %s", s->string);
411 if (server_supports_filtering && args->filter_options.choice)
412 packet_buf_write(&req_buf, "filter %s",
413 args->filter_options.filter_spec);
414 packet_buf_flush(&req_buf);
415 state_len = req_buf.len;
417 if (args->deepen) {
418 char *line;
419 const char *arg;
420 struct object_id oid;
422 send_request(args, fd[1], &req_buf);
423 while ((line = packet_read_line(fd[0], NULL))) {
424 if (skip_prefix(line, "shallow ", &arg)) {
425 if (get_oid_hex(arg, &oid))
426 die(_("invalid shallow line: %s"), line);
427 register_shallow(&oid);
428 continue;
430 if (skip_prefix(line, "unshallow ", &arg)) {
431 if (get_oid_hex(arg, &oid))
432 die(_("invalid unshallow line: %s"), line);
433 if (!lookup_object(oid.hash))
434 die(_("object not found: %s"), line);
435 /* make sure that it is parsed as shallow */
436 if (!parse_object(&oid))
437 die(_("error in object: %s"), line);
438 if (unregister_shallow(&oid))
439 die(_("no shallow found: %s"), line);
440 continue;
442 die(_("expected shallow/unshallow, got %s"), line);
444 } else if (!args->stateless_rpc)
445 send_request(args, fd[1], &req_buf);
447 if (!args->stateless_rpc) {
448 /* If we aren't using the stateless-rpc interface
449 * we don't need to retain the headers.
451 strbuf_setlen(&req_buf, 0);
452 state_len = 0;
455 flushes = 0;
456 retval = -1;
457 if (args->no_dependents)
458 goto done;
459 while ((oid = get_rev())) {
460 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
461 print_verbose(args, "have %s", oid_to_hex(oid));
462 in_vain++;
463 if (flush_at <= ++count) {
464 int ack;
466 packet_buf_flush(&req_buf);
467 send_request(args, fd[1], &req_buf);
468 strbuf_setlen(&req_buf, state_len);
469 flushes++;
470 flush_at = next_flush(args->stateless_rpc, count);
473 * We keep one window "ahead" of the other side, and
474 * will wait for an ACK only on the next one
476 if (!args->stateless_rpc && count == INITIAL_FLUSH)
477 continue;
479 consume_shallow_list(args, fd[0]);
480 do {
481 ack = get_ack(fd[0], result_oid);
482 if (ack)
483 print_verbose(args, _("got %s %d %s"), "ack",
484 ack, oid_to_hex(result_oid));
485 switch (ack) {
486 case ACK:
487 flushes = 0;
488 multi_ack = 0;
489 retval = 0;
490 goto done;
491 case ACK_common:
492 case ACK_ready:
493 case ACK_continue: {
494 struct commit *commit =
495 lookup_commit(result_oid);
496 if (!commit)
497 die(_("invalid commit %s"), oid_to_hex(result_oid));
498 if (args->stateless_rpc
499 && ack == ACK_common
500 && !(commit->object.flags & COMMON)) {
501 /* We need to replay the have for this object
502 * on the next RPC request so the peer knows
503 * it is in common with us.
505 const char *hex = oid_to_hex(result_oid);
506 packet_buf_write(&req_buf, "have %s\n", hex);
507 state_len = req_buf.len;
509 * Reset in_vain because an ack
510 * for this commit has not been
511 * seen.
513 in_vain = 0;
514 } else if (!args->stateless_rpc
515 || ack != ACK_common)
516 in_vain = 0;
517 mark_common(commit, 0, 1);
518 retval = 0;
519 got_continue = 1;
520 if (ack == ACK_ready)
521 got_ready = 1;
522 break;
525 } while (ack);
526 flushes--;
527 if (got_continue && MAX_IN_VAIN < in_vain) {
528 print_verbose(args, _("giving up"));
529 break; /* give up */
531 if (got_ready)
532 break;
535 done:
536 if (!got_ready || !no_done) {
537 packet_buf_write(&req_buf, "done\n");
538 send_request(args, fd[1], &req_buf);
540 print_verbose(args, _("done"));
541 if (retval != 0) {
542 multi_ack = 0;
543 flushes++;
545 strbuf_release(&req_buf);
547 if (!got_ready || !no_done)
548 consume_shallow_list(args, fd[0]);
549 while (flushes || multi_ack) {
550 int ack = get_ack(fd[0], result_oid);
551 if (ack) {
552 print_verbose(args, _("got %s (%d) %s"), "ack",
553 ack, oid_to_hex(result_oid));
554 if (ack == ACK)
555 return 0;
556 multi_ack = 1;
557 continue;
559 flushes--;
561 /* it is no error to fetch into a completely empty repo */
562 return count ? retval : 0;
565 static struct commit_list *complete;
567 static int mark_complete(const struct object_id *oid)
569 struct object *o = parse_object(oid);
571 while (o && o->type == OBJ_TAG) {
572 struct tag *t = (struct tag *) o;
573 if (!t->tagged)
574 break; /* broken repository */
575 o->flags |= COMPLETE;
576 o = parse_object(&t->tagged->oid);
578 if (o && o->type == OBJ_COMMIT) {
579 struct commit *commit = (struct commit *)o;
580 if (!(commit->object.flags & COMPLETE)) {
581 commit->object.flags |= COMPLETE;
582 commit_list_insert(commit, &complete);
585 return 0;
588 static int mark_complete_oid(const char *refname, const struct object_id *oid,
589 int flag, void *cb_data)
591 return mark_complete(oid);
594 static void mark_recent_complete_commits(struct fetch_pack_args *args,
595 timestamp_t cutoff)
597 while (complete && cutoff <= complete->item->date) {
598 print_verbose(args, _("Marking %s as complete"),
599 oid_to_hex(&complete->item->object.oid));
600 pop_most_recent_commit(&complete, COMPLETE);
604 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
606 for (; refs; refs = refs->next)
607 oidset_insert(oids, &refs->old_oid);
610 static int tip_oids_contain(struct oidset *tip_oids,
611 struct ref *unmatched, struct ref *newlist,
612 const struct object_id *id)
615 * Note that this only looks at the ref lists the first time it's
616 * called. This works out in filter_refs() because even though it may
617 * add to "newlist" between calls, the additions will always be for
618 * oids that are already in the set.
620 if (!tip_oids->map.map.tablesize) {
621 add_refs_to_oidset(tip_oids, unmatched);
622 add_refs_to_oidset(tip_oids, newlist);
624 return oidset_contains(tip_oids, id);
627 static void filter_refs(struct fetch_pack_args *args,
628 struct ref **refs,
629 struct ref **sought, int nr_sought)
631 struct ref *newlist = NULL;
632 struct ref **newtail = &newlist;
633 struct ref *unmatched = NULL;
634 struct ref *ref, *next;
635 struct oidset tip_oids = OIDSET_INIT;
636 int i;
638 i = 0;
639 for (ref = *refs; ref; ref = next) {
640 int keep = 0;
641 next = ref->next;
643 if (starts_with(ref->name, "refs/") &&
644 check_refname_format(ref->name, 0))
645 ; /* trash */
646 else {
647 while (i < nr_sought) {
648 int cmp = strcmp(ref->name, sought[i]->name);
649 if (cmp < 0)
650 break; /* definitely do not have it */
651 else if (cmp == 0) {
652 keep = 1; /* definitely have it */
653 sought[i]->match_status = REF_MATCHED;
655 i++;
659 if (!keep && args->fetch_all &&
660 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
661 keep = 1;
663 if (keep) {
664 *newtail = ref;
665 ref->next = NULL;
666 newtail = &ref->next;
667 } else {
668 ref->next = unmatched;
669 unmatched = ref;
673 /* Append unmatched requests to the list */
674 for (i = 0; i < nr_sought; i++) {
675 struct object_id oid;
676 const char *p;
678 ref = sought[i];
679 if (ref->match_status != REF_NOT_MATCHED)
680 continue;
681 if (parse_oid_hex(ref->name, &oid, &p) ||
682 *p != '\0' ||
683 oidcmp(&oid, &ref->old_oid))
684 continue;
686 if ((allow_unadvertised_object_request &
687 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
688 tip_oids_contain(&tip_oids, unmatched, newlist,
689 &ref->old_oid)) {
690 ref->match_status = REF_MATCHED;
691 *newtail = copy_ref(ref);
692 newtail = &(*newtail)->next;
693 } else {
694 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
698 oidset_clear(&tip_oids);
699 for (ref = unmatched; ref; ref = next) {
700 next = ref->next;
701 free(ref);
704 *refs = newlist;
707 static void mark_alternate_complete(struct object *obj)
709 mark_complete(&obj->oid);
712 struct loose_object_iter {
713 struct oidset *loose_object_set;
714 struct ref *refs;
718 * If the number of refs is not larger than the number of loose objects,
719 * this function stops inserting.
721 static int add_loose_objects_to_set(const struct object_id *oid,
722 const char *path,
723 void *data)
725 struct loose_object_iter *iter = data;
726 oidset_insert(iter->loose_object_set, oid);
727 if (iter->refs == NULL)
728 return 1;
730 iter->refs = iter->refs->next;
731 return 0;
735 * Mark recent commits available locally and reachable from a local ref as
736 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
737 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
738 * thus do not need COMMON_REF marks).
740 * The cutoff time for recency is determined by this heuristic: it is the
741 * earliest commit time of the objects in refs that are commits and that we know
742 * the commit time of.
744 static void mark_complete_and_common_ref(struct fetch_pack_args *args,
745 struct ref **refs)
747 struct ref *ref;
748 int old_save_commit_buffer = save_commit_buffer;
749 timestamp_t cutoff = 0;
750 struct oidset loose_oid_set = OIDSET_INIT;
751 int use_oidset = 0;
752 struct loose_object_iter iter = {&loose_oid_set, *refs};
754 /* Enumerate all loose objects or know refs are not so many. */
755 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
756 &iter, 0);
758 save_commit_buffer = 0;
760 for (ref = *refs; ref; ref = ref->next) {
761 struct object *o;
762 unsigned int flags = OBJECT_INFO_QUICK;
764 if (use_oidset &&
765 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
767 * I know this does not exist in the loose form,
768 * so check if it exists in a non-loose form.
770 flags |= OBJECT_INFO_IGNORE_LOOSE;
773 if (!has_object_file_with_flags(&ref->old_oid, flags))
774 continue;
775 o = parse_object(&ref->old_oid);
776 if (!o)
777 continue;
779 /* We already have it -- which may mean that we were
780 * in sync with the other side at some time after
781 * that (it is OK if we guess wrong here).
783 if (o->type == OBJ_COMMIT) {
784 struct commit *commit = (struct commit *)o;
785 if (!cutoff || cutoff < commit->date)
786 cutoff = commit->date;
790 oidset_clear(&loose_oid_set);
792 if (!args->no_dependents) {
793 if (!args->deepen) {
794 for_each_ref(mark_complete_oid, NULL);
795 for_each_cached_alternate(mark_alternate_complete);
796 commit_list_sort_by_date(&complete);
797 if (cutoff)
798 mark_recent_complete_commits(args, cutoff);
802 * Mark all complete remote refs as common refs.
803 * Don't mark them common yet; the server has to be told so first.
805 for (ref = *refs; ref; ref = ref->next) {
806 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
807 NULL, 0);
809 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
810 continue;
812 if (!(o->flags & SEEN)) {
813 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
815 mark_common((struct commit *)o, 1, 1);
820 save_commit_buffer = old_save_commit_buffer;
824 * Returns 1 if every object pointed to by the given remote refs is available
825 * locally and reachable from a local ref, and 0 otherwise.
827 static int everything_local(struct fetch_pack_args *args,
828 struct ref **refs)
830 struct ref *ref;
831 int retval;
833 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
834 const struct object_id *remote = &ref->old_oid;
835 struct object *o;
837 o = lookup_object(remote->hash);
838 if (!o || !(o->flags & COMPLETE)) {
839 retval = 0;
840 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
841 ref->name);
842 continue;
844 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
845 ref->name);
848 return retval;
851 static int sideband_demux(int in, int out, void *data)
853 int *xd = data;
854 int ret;
856 ret = recv_sideband("fetch-pack", xd[0], out);
857 close(out);
858 return ret;
861 static int get_pack(struct fetch_pack_args *args,
862 int xd[2], char **pack_lockfile)
864 struct async demux;
865 int do_keep = args->keep_pack;
866 const char *cmd_name;
867 struct pack_header header;
868 int pass_header = 0;
869 struct child_process cmd = CHILD_PROCESS_INIT;
870 int ret;
872 memset(&demux, 0, sizeof(demux));
873 if (use_sideband) {
874 /* xd[] is talking with upload-pack; subprocess reads from
875 * xd[0], spits out band#2 to stderr, and feeds us band#1
876 * through demux->out.
878 demux.proc = sideband_demux;
879 demux.data = xd;
880 demux.out = -1;
881 demux.isolate_sigpipe = 1;
882 if (start_async(&demux))
883 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
885 else
886 demux.out = xd[0];
888 if (!args->keep_pack && unpack_limit) {
890 if (read_pack_header(demux.out, &header))
891 die(_("protocol error: bad pack header"));
892 pass_header = 1;
893 if (ntohl(header.hdr_entries) < unpack_limit)
894 do_keep = 0;
895 else
896 do_keep = 1;
899 if (alternate_shallow_file) {
900 argv_array_push(&cmd.args, "--shallow-file");
901 argv_array_push(&cmd.args, alternate_shallow_file);
904 if (do_keep || args->from_promisor) {
905 if (pack_lockfile)
906 cmd.out = -1;
907 cmd_name = "index-pack";
908 argv_array_push(&cmd.args, cmd_name);
909 argv_array_push(&cmd.args, "--stdin");
910 if (!args->quiet && !args->no_progress)
911 argv_array_push(&cmd.args, "-v");
912 if (args->use_thin_pack)
913 argv_array_push(&cmd.args, "--fix-thin");
914 if (do_keep && (args->lock_pack || unpack_limit)) {
915 char hostname[HOST_NAME_MAX + 1];
916 if (xgethostname(hostname, sizeof(hostname)))
917 xsnprintf(hostname, sizeof(hostname), "localhost");
918 argv_array_pushf(&cmd.args,
919 "--keep=fetch-pack %"PRIuMAX " on %s",
920 (uintmax_t)getpid(), hostname);
922 if (args->check_self_contained_and_connected)
923 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
924 if (args->from_promisor)
925 argv_array_push(&cmd.args, "--promisor");
927 else {
928 cmd_name = "unpack-objects";
929 argv_array_push(&cmd.args, cmd_name);
930 if (args->quiet || args->no_progress)
931 argv_array_push(&cmd.args, "-q");
932 args->check_self_contained_and_connected = 0;
935 if (pass_header)
936 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
937 ntohl(header.hdr_version),
938 ntohl(header.hdr_entries));
939 if (fetch_fsck_objects >= 0
940 ? fetch_fsck_objects
941 : transfer_fsck_objects >= 0
942 ? transfer_fsck_objects
943 : 0) {
944 if (args->from_promisor)
946 * We cannot use --strict in index-pack because it
947 * checks both broken objects and links, but we only
948 * want to check for broken objects.
950 argv_array_push(&cmd.args, "--fsck-objects");
951 else
952 argv_array_push(&cmd.args, "--strict");
955 cmd.in = demux.out;
956 cmd.git_cmd = 1;
957 if (start_command(&cmd))
958 die(_("fetch-pack: unable to fork off %s"), cmd_name);
959 if (do_keep && pack_lockfile) {
960 *pack_lockfile = index_pack_lockfile(cmd.out);
961 close(cmd.out);
964 if (!use_sideband)
965 /* Closed by start_command() */
966 xd[0] = -1;
968 ret = finish_command(&cmd);
969 if (!ret || (args->check_self_contained_and_connected && ret == 1))
970 args->self_contained_and_connected =
971 args->check_self_contained_and_connected &&
972 ret == 0;
973 else
974 die(_("%s failed"), cmd_name);
975 if (use_sideband && finish_async(&demux))
976 die(_("error in sideband demultiplexer"));
977 return 0;
980 static int cmp_ref_by_name(const void *a_, const void *b_)
982 const struct ref *a = *((const struct ref **)a_);
983 const struct ref *b = *((const struct ref **)b_);
984 return strcmp(a->name, b->name);
987 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
988 int fd[2],
989 const struct ref *orig_ref,
990 struct ref **sought, int nr_sought,
991 struct shallow_info *si,
992 char **pack_lockfile)
994 struct ref *ref = copy_ref_list(orig_ref);
995 struct object_id oid;
996 const char *agent_feature;
997 int agent_len;
999 sort_ref_list(&ref, ref_compare_name);
1000 QSORT(sought, nr_sought, cmp_ref_by_name);
1002 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
1003 die(_("Server does not support shallow clients"));
1004 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1005 args->deepen = 1;
1006 if (server_supports("multi_ack_detailed")) {
1007 print_verbose(args, _("Server supports multi_ack_detailed"));
1008 multi_ack = 2;
1009 if (server_supports("no-done")) {
1010 print_verbose(args, _("Server supports no-done"));
1011 if (args->stateless_rpc)
1012 no_done = 1;
1015 else if (server_supports("multi_ack")) {
1016 print_verbose(args, _("Server supports multi_ack"));
1017 multi_ack = 1;
1019 if (server_supports("side-band-64k")) {
1020 print_verbose(args, _("Server supports side-band-64k"));
1021 use_sideband = 2;
1023 else if (server_supports("side-band")) {
1024 print_verbose(args, _("Server supports side-band"));
1025 use_sideband = 1;
1027 if (server_supports("allow-tip-sha1-in-want")) {
1028 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1029 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1031 if (server_supports("allow-reachable-sha1-in-want")) {
1032 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1033 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1035 if (!server_supports("thin-pack"))
1036 args->use_thin_pack = 0;
1037 if (!server_supports("no-progress"))
1038 args->no_progress = 0;
1039 if (!server_supports("include-tag"))
1040 args->include_tag = 0;
1041 if (server_supports("ofs-delta"))
1042 print_verbose(args, _("Server supports ofs-delta"));
1043 else
1044 prefer_ofs_delta = 0;
1046 if (server_supports("filter")) {
1047 server_supports_filtering = 1;
1048 print_verbose(args, _("Server supports filter"));
1049 } else if (args->filter_options.choice) {
1050 warning("filtering not recognized by server, ignoring");
1053 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1054 agent_supported = 1;
1055 if (agent_len)
1056 print_verbose(args, _("Server version is %.*s"),
1057 agent_len, agent_feature);
1059 if (server_supports("deepen-since"))
1060 deepen_since_ok = 1;
1061 else if (args->deepen_since)
1062 die(_("Server does not support --shallow-since"));
1063 if (server_supports("deepen-not"))
1064 deepen_not_ok = 1;
1065 else if (args->deepen_not)
1066 die(_("Server does not support --shallow-exclude"));
1067 if (!server_supports("deepen-relative") && args->deepen_relative)
1068 die(_("Server does not support --deepen"));
1070 if (marked)
1071 for_each_ref(clear_marks, NULL);
1072 marked = 1;
1073 mark_complete_and_common_ref(args, &ref);
1074 filter_refs(args, &ref, sought, nr_sought);
1075 if (everything_local(args, &ref)) {
1076 packet_flush(fd[1]);
1077 goto all_done;
1079 if (find_common(args, fd, &oid, ref) < 0)
1080 if (!args->keep_pack)
1081 /* When cloning, it is not unusual to have
1082 * no common commit.
1084 warning(_("no common commits"));
1086 if (args->stateless_rpc)
1087 packet_flush(fd[1]);
1088 if (args->deepen)
1089 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1090 NULL);
1091 else if (si->nr_ours || si->nr_theirs)
1092 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1093 else
1094 alternate_shallow_file = NULL;
1095 if (get_pack(args, fd, pack_lockfile))
1096 die(_("git fetch-pack: fetch failed."));
1098 all_done:
1099 clear_prio_queue(&rev_list);
1100 return ref;
1103 static void add_shallow_requests(struct strbuf *req_buf,
1104 const struct fetch_pack_args *args)
1106 if (is_repository_shallow())
1107 write_shallow_commits(req_buf, 1, NULL);
1108 if (args->depth > 0)
1109 packet_buf_write(req_buf, "deepen %d", args->depth);
1110 if (args->deepen_since) {
1111 timestamp_t max_age = approxidate(args->deepen_since);
1112 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1114 if (args->deepen_not) {
1115 int i;
1116 for (i = 0; i < args->deepen_not->nr; i++) {
1117 struct string_list_item *s = args->deepen_not->items + i;
1118 packet_buf_write(req_buf, "deepen-not %s", s->string);
1123 static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1125 for ( ; wants ; wants = wants->next) {
1126 const struct object_id *remote = &wants->old_oid;
1127 const char *remote_hex;
1128 struct object *o;
1131 * If that object is complete (i.e. it is an ancestor of a
1132 * local ref), we tell them we have it but do not have to
1133 * tell them about its ancestors, which they already know
1134 * about.
1136 * We use lookup_object here because we are only
1137 * interested in the case we *know* the object is
1138 * reachable and we have already scanned it.
1140 if (((o = lookup_object(remote->hash)) != NULL) &&
1141 (o->flags & COMPLETE)) {
1142 continue;
1145 remote_hex = oid_to_hex(remote);
1146 packet_buf_write(req_buf, "want %s\n", remote_hex);
1150 static void add_common(struct strbuf *req_buf, struct oidset *common)
1152 struct oidset_iter iter;
1153 const struct object_id *oid;
1154 oidset_iter_init(common, &iter);
1156 while ((oid = oidset_iter_next(&iter))) {
1157 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1161 static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1163 int ret = 0;
1164 int haves_added = 0;
1165 const struct object_id *oid;
1167 while ((oid = get_rev())) {
1168 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1169 if (++haves_added >= *haves_to_send)
1170 break;
1173 *in_vain += haves_added;
1174 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1175 /* Send Done */
1176 packet_buf_write(req_buf, "done\n");
1177 ret = 1;
1180 /* Increase haves to send on next round */
1181 *haves_to_send = next_flush(1, *haves_to_send);
1183 return ret;
1186 static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1187 const struct ref *wants, struct oidset *common,
1188 int *haves_to_send, int *in_vain)
1190 int ret = 0;
1191 struct strbuf req_buf = STRBUF_INIT;
1193 if (server_supports_v2("fetch", 1))
1194 packet_buf_write(&req_buf, "command=fetch");
1195 if (server_supports_v2("agent", 0))
1196 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1197 if (args->server_options && args->server_options->nr &&
1198 server_supports_v2("server-option", 1)) {
1199 int i;
1200 for (i = 0; i < args->server_options->nr; i++)
1201 packet_write_fmt(fd_out, "server-option=%s",
1202 args->server_options->items[i].string);
1205 packet_buf_delim(&req_buf);
1206 if (args->use_thin_pack)
1207 packet_buf_write(&req_buf, "thin-pack");
1208 if (args->no_progress)
1209 packet_buf_write(&req_buf, "no-progress");
1210 if (args->include_tag)
1211 packet_buf_write(&req_buf, "include-tag");
1212 if (prefer_ofs_delta)
1213 packet_buf_write(&req_buf, "ofs-delta");
1215 /* Add shallow-info and deepen request */
1216 if (server_supports_feature("fetch", "shallow", 0))
1217 add_shallow_requests(&req_buf, args);
1218 else if (is_repository_shallow() || args->deepen)
1219 die(_("Server does not support shallow requests"));
1221 /* Add filter */
1222 if (server_supports_feature("fetch", "filter", 0) &&
1223 args->filter_options.choice) {
1224 print_verbose(args, _("Server supports filter"));
1225 packet_buf_write(&req_buf, "filter %s",
1226 args->filter_options.filter_spec);
1227 } else if (args->filter_options.choice) {
1228 warning("filtering not recognized by server, ignoring");
1231 /* add wants */
1232 add_wants(wants, &req_buf);
1234 if (args->no_dependents) {
1235 packet_buf_write(&req_buf, "done");
1236 ret = 1;
1237 } else {
1238 /* Add all of the common commits we've found in previous rounds */
1239 add_common(&req_buf, common);
1241 /* Add initial haves */
1242 ret = add_haves(&req_buf, haves_to_send, in_vain);
1245 /* Send request */
1246 packet_buf_flush(&req_buf);
1247 write_or_die(fd_out, req_buf.buf, req_buf.len);
1249 strbuf_release(&req_buf);
1250 return ret;
1254 * Processes a section header in a server's response and checks if it matches
1255 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1256 * not consumed); if 0, the line will be consumed and the function will die if
1257 * the section header doesn't match what was expected.
1259 static int process_section_header(struct packet_reader *reader,
1260 const char *section, int peek)
1262 int ret;
1264 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1265 die("error reading section header '%s'", section);
1267 ret = !strcmp(reader->line, section);
1269 if (!peek) {
1270 if (!ret)
1271 die("expected '%s', received '%s'",
1272 section, reader->line);
1273 packet_reader_read(reader);
1276 return ret;
1279 static int process_acks(struct packet_reader *reader, struct oidset *common)
1281 /* received */
1282 int received_ready = 0;
1283 int received_ack = 0;
1285 process_section_header(reader, "acknowledgments", 0);
1286 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1287 const char *arg;
1289 if (!strcmp(reader->line, "NAK"))
1290 continue;
1292 if (skip_prefix(reader->line, "ACK ", &arg)) {
1293 struct object_id oid;
1294 if (!get_oid_hex(arg, &oid)) {
1295 struct commit *commit;
1296 oidset_insert(common, &oid);
1297 commit = lookup_commit(&oid);
1298 mark_common(commit, 0, 1);
1300 continue;
1303 if (!strcmp(reader->line, "ready")) {
1304 received_ready = 1;
1305 continue;
1308 die("unexpected acknowledgment line: '%s'", reader->line);
1311 if (reader->status != PACKET_READ_FLUSH &&
1312 reader->status != PACKET_READ_DELIM)
1313 die("error processing acks: %d", reader->status);
1315 /* return 0 if no common, 1 if there are common, or 2 if ready */
1316 return received_ready ? 2 : (received_ack ? 1 : 0);
1319 static void receive_shallow_info(struct fetch_pack_args *args,
1320 struct packet_reader *reader)
1322 process_section_header(reader, "shallow-info", 0);
1323 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1324 const char *arg;
1325 struct object_id oid;
1327 if (skip_prefix(reader->line, "shallow ", &arg)) {
1328 if (get_oid_hex(arg, &oid))
1329 die(_("invalid shallow line: %s"), reader->line);
1330 register_shallow(&oid);
1331 continue;
1333 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1334 if (get_oid_hex(arg, &oid))
1335 die(_("invalid unshallow line: %s"), reader->line);
1336 if (!lookup_object(oid.hash))
1337 die(_("object not found: %s"), reader->line);
1338 /* make sure that it is parsed as shallow */
1339 if (!parse_object(&oid))
1340 die(_("error in object: %s"), reader->line);
1341 if (unregister_shallow(&oid))
1342 die(_("no shallow found: %s"), reader->line);
1343 continue;
1345 die(_("expected shallow/unshallow, got %s"), reader->line);
1348 if (reader->status != PACKET_READ_FLUSH &&
1349 reader->status != PACKET_READ_DELIM)
1350 die("error processing shallow info: %d", reader->status);
1352 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1353 args->deepen = 1;
1356 enum fetch_state {
1357 FETCH_CHECK_LOCAL = 0,
1358 FETCH_SEND_REQUEST,
1359 FETCH_PROCESS_ACKS,
1360 FETCH_GET_PACK,
1361 FETCH_DONE,
1364 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1365 int fd[2],
1366 const struct ref *orig_ref,
1367 struct ref **sought, int nr_sought,
1368 char **pack_lockfile)
1370 struct ref *ref = copy_ref_list(orig_ref);
1371 enum fetch_state state = FETCH_CHECK_LOCAL;
1372 struct oidset common = OIDSET_INIT;
1373 struct packet_reader reader;
1374 int in_vain = 0;
1375 int haves_to_send = INITIAL_FLUSH;
1376 packet_reader_init(&reader, fd[0], NULL, 0,
1377 PACKET_READ_CHOMP_NEWLINE);
1379 while (state != FETCH_DONE) {
1380 switch (state) {
1381 case FETCH_CHECK_LOCAL:
1382 sort_ref_list(&ref, ref_compare_name);
1383 QSORT(sought, nr_sought, cmp_ref_by_name);
1385 /* v2 supports these by default */
1386 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1387 use_sideband = 2;
1388 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1389 args->deepen = 1;
1391 if (marked)
1392 for_each_ref(clear_marks, NULL);
1393 marked = 1;
1395 /* Filter 'ref' by 'sought' and those that aren't local */
1396 mark_complete_and_common_ref(args, &ref);
1397 filter_refs(args, &ref, sought, nr_sought);
1398 if (everything_local(args, &ref))
1399 state = FETCH_DONE;
1400 else
1401 state = FETCH_SEND_REQUEST;
1403 for_each_ref(rev_list_insert_ref_oid, NULL);
1404 for_each_cached_alternate(insert_one_alternate_object);
1405 break;
1406 case FETCH_SEND_REQUEST:
1407 if (send_fetch_request(fd[1], args, ref, &common,
1408 &haves_to_send, &in_vain))
1409 state = FETCH_GET_PACK;
1410 else
1411 state = FETCH_PROCESS_ACKS;
1412 break;
1413 case FETCH_PROCESS_ACKS:
1414 /* Process ACKs/NAKs */
1415 switch (process_acks(&reader, &common)) {
1416 case 2:
1417 state = FETCH_GET_PACK;
1418 break;
1419 case 1:
1420 in_vain = 0;
1421 /* fallthrough */
1422 default:
1423 state = FETCH_SEND_REQUEST;
1424 break;
1426 break;
1427 case FETCH_GET_PACK:
1428 /* Check for shallow-info section */
1429 if (process_section_header(&reader, "shallow-info", 1))
1430 receive_shallow_info(args, &reader);
1432 /* get the pack */
1433 process_section_header(&reader, "packfile", 0);
1434 if (get_pack(args, fd, pack_lockfile))
1435 die(_("git fetch-pack: fetch failed."));
1437 state = FETCH_DONE;
1438 break;
1439 case FETCH_DONE:
1440 continue;
1444 clear_prio_queue(&rev_list);
1445 oidset_clear(&common);
1446 return ref;
1449 static void fetch_pack_config(void)
1451 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1452 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1453 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1454 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1455 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1457 git_config(git_default_config, NULL);
1460 static void fetch_pack_setup(void)
1462 static int did_setup;
1463 if (did_setup)
1464 return;
1465 fetch_pack_config();
1466 if (0 <= transfer_unpack_limit)
1467 unpack_limit = transfer_unpack_limit;
1468 else if (0 <= fetch_unpack_limit)
1469 unpack_limit = fetch_unpack_limit;
1470 did_setup = 1;
1473 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1475 struct string_list names = STRING_LIST_INIT_NODUP;
1476 int src, dst;
1478 for (src = dst = 0; src < nr; src++) {
1479 struct string_list_item *item;
1480 item = string_list_insert(&names, ref[src]->name);
1481 if (item->util)
1482 continue; /* already have it */
1483 item->util = ref[src];
1484 if (src != dst)
1485 ref[dst] = ref[src];
1486 dst++;
1488 for (src = dst; src < nr; src++)
1489 ref[src] = NULL;
1490 string_list_clear(&names, 0);
1491 return dst;
1494 static void update_shallow(struct fetch_pack_args *args,
1495 struct ref **sought, int nr_sought,
1496 struct shallow_info *si)
1498 struct oid_array ref = OID_ARRAY_INIT;
1499 int *status;
1500 int i;
1502 if (args->deepen && alternate_shallow_file) {
1503 if (*alternate_shallow_file == '\0') { /* --unshallow */
1504 unlink_or_warn(git_path_shallow());
1505 rollback_lock_file(&shallow_lock);
1506 } else
1507 commit_lock_file(&shallow_lock);
1508 return;
1511 if (!si->shallow || !si->shallow->nr)
1512 return;
1514 if (args->cloning) {
1516 * remote is shallow, but this is a clone, there are
1517 * no objects in repo to worry about. Accept any
1518 * shallow points that exist in the pack (iow in repo
1519 * after get_pack() and reprepare_packed_git())
1521 struct oid_array extra = OID_ARRAY_INIT;
1522 struct object_id *oid = si->shallow->oid;
1523 for (i = 0; i < si->shallow->nr; i++)
1524 if (has_object_file(&oid[i]))
1525 oid_array_append(&extra, &oid[i]);
1526 if (extra.nr) {
1527 setup_alternate_shallow(&shallow_lock,
1528 &alternate_shallow_file,
1529 &extra);
1530 commit_lock_file(&shallow_lock);
1532 oid_array_clear(&extra);
1533 return;
1536 if (!si->nr_ours && !si->nr_theirs)
1537 return;
1539 remove_nonexistent_theirs_shallow(si);
1540 if (!si->nr_ours && !si->nr_theirs)
1541 return;
1542 for (i = 0; i < nr_sought; i++)
1543 oid_array_append(&ref, &sought[i]->old_oid);
1544 si->ref = &ref;
1546 if (args->update_shallow) {
1548 * remote is also shallow, .git/shallow may be updated
1549 * so all refs can be accepted. Make sure we only add
1550 * shallow roots that are actually reachable from new
1551 * refs.
1553 struct oid_array extra = OID_ARRAY_INIT;
1554 struct object_id *oid = si->shallow->oid;
1555 assign_shallow_commits_to_refs(si, NULL, NULL);
1556 if (!si->nr_ours && !si->nr_theirs) {
1557 oid_array_clear(&ref);
1558 return;
1560 for (i = 0; i < si->nr_ours; i++)
1561 oid_array_append(&extra, &oid[si->ours[i]]);
1562 for (i = 0; i < si->nr_theirs; i++)
1563 oid_array_append(&extra, &oid[si->theirs[i]]);
1564 setup_alternate_shallow(&shallow_lock,
1565 &alternate_shallow_file,
1566 &extra);
1567 commit_lock_file(&shallow_lock);
1568 oid_array_clear(&extra);
1569 oid_array_clear(&ref);
1570 return;
1574 * remote is also shallow, check what ref is safe to update
1575 * without updating .git/shallow
1577 status = xcalloc(nr_sought, sizeof(*status));
1578 assign_shallow_commits_to_refs(si, NULL, status);
1579 if (si->nr_ours || si->nr_theirs) {
1580 for (i = 0; i < nr_sought; i++)
1581 if (status[i])
1582 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1584 free(status);
1585 oid_array_clear(&ref);
1588 struct ref *fetch_pack(struct fetch_pack_args *args,
1589 int fd[], struct child_process *conn,
1590 const struct ref *ref,
1591 const char *dest,
1592 struct ref **sought, int nr_sought,
1593 struct oid_array *shallow,
1594 char **pack_lockfile,
1595 enum protocol_version version)
1597 struct ref *ref_cpy;
1598 struct shallow_info si;
1600 fetch_pack_setup();
1601 if (nr_sought)
1602 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1604 if (!ref) {
1605 packet_flush(fd[1]);
1606 die(_("no matching remote head"));
1608 prepare_shallow_info(&si, shallow);
1609 if (version == protocol_v2)
1610 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1611 pack_lockfile);
1612 else
1613 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1614 &si, pack_lockfile);
1615 reprepare_packed_git(the_repository);
1616 update_shallow(args, sought, nr_sought, &si);
1617 clear_shallow_info(&si);
1618 return ref_cpy;
1621 int report_unmatched_refs(struct ref **sought, int nr_sought)
1623 int i, ret = 0;
1625 for (i = 0; i < nr_sought; i++) {
1626 if (!sought[i])
1627 continue;
1628 switch (sought[i]->match_status) {
1629 case REF_MATCHED:
1630 continue;
1631 case REF_NOT_MATCHED:
1632 error(_("no such remote ref %s"), sought[i]->name);
1633 break;
1634 case REF_UNADVERTISED_NOT_ALLOWED:
1635 error(_("Server does not allow request for unadvertised object %s"),
1636 sought[i]->name);
1637 break;
1639 ret = 1;
1641 return ret;