fetch-pack: split up everything_local()
[git.git] / fetch-pack.c
blob5c87bb8bb80ddc95f8a657df9c5d003b38291b98
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec-cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "prio-queue.h"
19 #include "sha1-array.h"
20 #include "oidset.h"
21 #include "packfile.h"
23 static int transfer_unpack_limit = -1;
24 static int fetch_unpack_limit = -1;
25 static int unpack_limit = 100;
26 static int prefer_ofs_delta = 1;
27 static int no_done;
28 static int deepen_since_ok;
29 static int deepen_not_ok;
30 static int fetch_fsck_objects = -1;
31 static int transfer_fsck_objects = -1;
32 static int agent_supported;
33 static int server_supports_filtering;
34 static struct lock_file shallow_lock;
35 static const char *alternate_shallow_file;
37 /* Remember to update object flag allocation in object.h */
38 #define COMPLETE (1U << 0)
39 #define COMMON (1U << 1)
40 #define COMMON_REF (1U << 2)
41 #define SEEN (1U << 3)
42 #define POPPED (1U << 4)
43 #define ALTERNATE (1U << 5)
45 static int marked;
48 * After sending this many "have"s if we do not get any new ACK , we
49 * give up traversing our history.
51 #define MAX_IN_VAIN 256
53 static struct prio_queue rev_list = { compare_commits_by_commit_date };
54 static int non_common_revs, multi_ack, use_sideband;
55 /* Allow specifying sha1 if it is a ref tip. */
56 #define ALLOW_TIP_SHA1 01
57 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
58 #define ALLOW_REACHABLE_SHA1 02
59 static unsigned int allow_unadvertised_object_request;
61 __attribute__((format (printf, 2, 3)))
62 static inline void print_verbose(const struct fetch_pack_args *args,
63 const char *fmt, ...)
65 va_list params;
67 if (!args->verbose)
68 return;
70 va_start(params, fmt);
71 vfprintf(stderr, fmt, params);
72 va_end(params);
73 fputc('\n', stderr);
76 struct alternate_object_cache {
77 struct object **items;
78 size_t nr, alloc;
81 static void cache_one_alternate(const char *refname,
82 const struct object_id *oid,
83 void *vcache)
85 struct alternate_object_cache *cache = vcache;
86 struct object *obj = parse_object(oid);
88 if (!obj || (obj->flags & ALTERNATE))
89 return;
91 obj->flags |= ALTERNATE;
92 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
93 cache->items[cache->nr++] = obj;
96 static void for_each_cached_alternate(void (*cb)(struct object *))
98 static int initialized;
99 static struct alternate_object_cache cache;
100 size_t i;
102 if (!initialized) {
103 for_each_alternate_ref(cache_one_alternate, &cache);
104 initialized = 1;
107 for (i = 0; i < cache.nr; i++)
108 cb(cache.items[i]);
111 static void rev_list_push(struct commit *commit, int mark)
113 if (!(commit->object.flags & mark)) {
114 commit->object.flags |= mark;
116 if (parse_commit(commit))
117 return;
119 prio_queue_put(&rev_list, commit);
121 if (!(commit->object.flags & COMMON))
122 non_common_revs++;
126 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128 struct object *o = deref_tag(parse_object(oid), refname, 0);
130 if (o && o->type == OBJ_COMMIT)
131 rev_list_push((struct commit *)o, SEEN);
133 return 0;
136 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
137 int flag, void *cb_data)
139 return rev_list_insert_ref(refname, oid);
142 static int clear_marks(const char *refname, const struct object_id *oid,
143 int flag, void *cb_data)
145 struct object *o = deref_tag(parse_object(oid), refname, 0);
147 if (o && o->type == OBJ_COMMIT)
148 clear_commit_marks((struct commit *)o,
149 COMMON | COMMON_REF | SEEN | POPPED);
150 return 0;
154 This function marks a rev and its ancestors as common.
155 In some cases, it is desirable to mark only the ancestors (for example
156 when only the server does not yet know that they are common).
159 static void mark_common(struct commit *commit,
160 int ancestors_only, int dont_parse)
162 if (commit != NULL && !(commit->object.flags & COMMON)) {
163 struct object *o = (struct object *)commit;
165 if (!ancestors_only)
166 o->flags |= COMMON;
168 if (!(o->flags & SEEN))
169 rev_list_push(commit, SEEN);
170 else {
171 struct commit_list *parents;
173 if (!ancestors_only && !(o->flags & POPPED))
174 non_common_revs--;
175 if (!o->parsed && !dont_parse)
176 if (parse_commit(commit))
177 return;
179 for (parents = commit->parents;
180 parents;
181 parents = parents->next)
182 mark_common(parents->item, 0, dont_parse);
188 Get the next rev to send, ignoring the common.
191 static const struct object_id *get_rev(void)
193 struct commit *commit = NULL;
195 while (commit == NULL) {
196 unsigned int mark;
197 struct commit_list *parents;
199 if (rev_list.nr == 0 || non_common_revs == 0)
200 return NULL;
202 commit = prio_queue_get(&rev_list);
203 parse_commit(commit);
204 parents = commit->parents;
206 commit->object.flags |= POPPED;
207 if (!(commit->object.flags & COMMON))
208 non_common_revs--;
210 if (commit->object.flags & COMMON) {
211 /* do not send "have", and ignore ancestors */
212 commit = NULL;
213 mark = COMMON | SEEN;
214 } else if (commit->object.flags & COMMON_REF)
215 /* send "have", and ignore ancestors */
216 mark = COMMON | SEEN;
217 else
218 /* send "have", also for its ancestors */
219 mark = SEEN;
221 while (parents) {
222 if (!(parents->item->object.flags & SEEN))
223 rev_list_push(parents->item, mark);
224 if (mark & COMMON)
225 mark_common(parents->item, 1, 0);
226 parents = parents->next;
230 return &commit->object.oid;
233 enum ack_type {
234 NAK = 0,
235 ACK,
236 ACK_continue,
237 ACK_common,
238 ACK_ready
241 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
243 if (args->stateless_rpc && args->deepen) {
244 /* If we sent a depth we will get back "duplicate"
245 * shallow and unshallow commands every time there
246 * is a block of have lines exchanged.
248 char *line;
249 while ((line = packet_read_line(fd, NULL))) {
250 if (starts_with(line, "shallow "))
251 continue;
252 if (starts_with(line, "unshallow "))
253 continue;
254 die(_("git fetch-pack: expected shallow list"));
259 static enum ack_type get_ack(int fd, struct object_id *result_oid)
261 int len;
262 char *line = packet_read_line(fd, &len);
263 const char *arg;
265 if (!line)
266 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
267 if (!strcmp(line, "NAK"))
268 return NAK;
269 if (skip_prefix(line, "ACK ", &arg)) {
270 if (!get_oid_hex(arg, result_oid)) {
271 arg += 40;
272 len -= arg - line;
273 if (len < 1)
274 return ACK;
275 if (strstr(arg, "continue"))
276 return ACK_continue;
277 if (strstr(arg, "common"))
278 return ACK_common;
279 if (strstr(arg, "ready"))
280 return ACK_ready;
281 return ACK;
284 if (skip_prefix(line, "ERR ", &arg))
285 die(_("remote error: %s"), arg);
286 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
289 static void send_request(struct fetch_pack_args *args,
290 int fd, struct strbuf *buf)
292 if (args->stateless_rpc) {
293 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
294 packet_flush(fd);
295 } else
296 write_or_die(fd, buf->buf, buf->len);
299 static void insert_one_alternate_object(struct object *obj)
301 rev_list_insert_ref(NULL, &obj->oid);
304 #define INITIAL_FLUSH 16
305 #define PIPESAFE_FLUSH 32
306 #define LARGE_FLUSH 16384
308 static int next_flush(int stateless_rpc, int count)
310 if (stateless_rpc) {
311 if (count < LARGE_FLUSH)
312 count <<= 1;
313 else
314 count = count * 11 / 10;
315 } else {
316 if (count < PIPESAFE_FLUSH)
317 count <<= 1;
318 else
319 count += PIPESAFE_FLUSH;
321 return count;
324 static int find_common(struct fetch_pack_args *args,
325 int fd[2], struct object_id *result_oid,
326 struct ref *refs)
328 int fetching;
329 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
330 const struct object_id *oid;
331 unsigned in_vain = 0;
332 int got_continue = 0;
333 int got_ready = 0;
334 struct strbuf req_buf = STRBUF_INIT;
335 size_t state_len = 0;
337 if (args->stateless_rpc && multi_ack == 1)
338 die(_("--stateless-rpc requires multi_ack_detailed"));
339 if (marked)
340 for_each_ref(clear_marks, NULL);
341 marked = 1;
343 for_each_ref(rev_list_insert_ref_oid, NULL);
344 for_each_cached_alternate(insert_one_alternate_object);
346 fetching = 0;
347 for ( ; refs ; refs = refs->next) {
348 struct object_id *remote = &refs->old_oid;
349 const char *remote_hex;
350 struct object *o;
353 * If that object is complete (i.e. it is an ancestor of a
354 * local ref), we tell them we have it but do not have to
355 * tell them about its ancestors, which they already know
356 * about.
358 * We use lookup_object here because we are only
359 * interested in the case we *know* the object is
360 * reachable and we have already scanned it.
362 if (((o = lookup_object(remote->hash)) != NULL) &&
363 (o->flags & COMPLETE)) {
364 continue;
367 remote_hex = oid_to_hex(remote);
368 if (!fetching) {
369 struct strbuf c = STRBUF_INIT;
370 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
371 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
372 if (no_done) strbuf_addstr(&c, " no-done");
373 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
374 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
375 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
376 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
377 if (args->no_progress) strbuf_addstr(&c, " no-progress");
378 if (args->include_tag) strbuf_addstr(&c, " include-tag");
379 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
380 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
381 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
382 if (agent_supported) strbuf_addf(&c, " agent=%s",
383 git_user_agent_sanitized());
384 if (args->filter_options.choice)
385 strbuf_addstr(&c, " filter");
386 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
387 strbuf_release(&c);
388 } else
389 packet_buf_write(&req_buf, "want %s\n", remote_hex);
390 fetching++;
393 if (!fetching) {
394 strbuf_release(&req_buf);
395 packet_flush(fd[1]);
396 return 1;
399 if (is_repository_shallow())
400 write_shallow_commits(&req_buf, 1, NULL);
401 if (args->depth > 0)
402 packet_buf_write(&req_buf, "deepen %d", args->depth);
403 if (args->deepen_since) {
404 timestamp_t max_age = approxidate(args->deepen_since);
405 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
407 if (args->deepen_not) {
408 int i;
409 for (i = 0; i < args->deepen_not->nr; i++) {
410 struct string_list_item *s = args->deepen_not->items + i;
411 packet_buf_write(&req_buf, "deepen-not %s", s->string);
414 if (server_supports_filtering && args->filter_options.choice)
415 packet_buf_write(&req_buf, "filter %s",
416 args->filter_options.filter_spec);
417 packet_buf_flush(&req_buf);
418 state_len = req_buf.len;
420 if (args->deepen) {
421 char *line;
422 const char *arg;
423 struct object_id oid;
425 send_request(args, fd[1], &req_buf);
426 while ((line = packet_read_line(fd[0], NULL))) {
427 if (skip_prefix(line, "shallow ", &arg)) {
428 if (get_oid_hex(arg, &oid))
429 die(_("invalid shallow line: %s"), line);
430 register_shallow(&oid);
431 continue;
433 if (skip_prefix(line, "unshallow ", &arg)) {
434 if (get_oid_hex(arg, &oid))
435 die(_("invalid unshallow line: %s"), line);
436 if (!lookup_object(oid.hash))
437 die(_("object not found: %s"), line);
438 /* make sure that it is parsed as shallow */
439 if (!parse_object(&oid))
440 die(_("error in object: %s"), line);
441 if (unregister_shallow(&oid))
442 die(_("no shallow found: %s"), line);
443 continue;
445 die(_("expected shallow/unshallow, got %s"), line);
447 } else if (!args->stateless_rpc)
448 send_request(args, fd[1], &req_buf);
450 if (!args->stateless_rpc) {
451 /* If we aren't using the stateless-rpc interface
452 * we don't need to retain the headers.
454 strbuf_setlen(&req_buf, 0);
455 state_len = 0;
458 flushes = 0;
459 retval = -1;
460 if (args->no_dependents)
461 goto done;
462 while ((oid = get_rev())) {
463 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
464 print_verbose(args, "have %s", oid_to_hex(oid));
465 in_vain++;
466 if (flush_at <= ++count) {
467 int ack;
469 packet_buf_flush(&req_buf);
470 send_request(args, fd[1], &req_buf);
471 strbuf_setlen(&req_buf, state_len);
472 flushes++;
473 flush_at = next_flush(args->stateless_rpc, count);
476 * We keep one window "ahead" of the other side, and
477 * will wait for an ACK only on the next one
479 if (!args->stateless_rpc && count == INITIAL_FLUSH)
480 continue;
482 consume_shallow_list(args, fd[0]);
483 do {
484 ack = get_ack(fd[0], result_oid);
485 if (ack)
486 print_verbose(args, _("got %s %d %s"), "ack",
487 ack, oid_to_hex(result_oid));
488 switch (ack) {
489 case ACK:
490 flushes = 0;
491 multi_ack = 0;
492 retval = 0;
493 goto done;
494 case ACK_common:
495 case ACK_ready:
496 case ACK_continue: {
497 struct commit *commit =
498 lookup_commit(result_oid);
499 if (!commit)
500 die(_("invalid commit %s"), oid_to_hex(result_oid));
501 if (args->stateless_rpc
502 && ack == ACK_common
503 && !(commit->object.flags & COMMON)) {
504 /* We need to replay the have for this object
505 * on the next RPC request so the peer knows
506 * it is in common with us.
508 const char *hex = oid_to_hex(result_oid);
509 packet_buf_write(&req_buf, "have %s\n", hex);
510 state_len = req_buf.len;
512 * Reset in_vain because an ack
513 * for this commit has not been
514 * seen.
516 in_vain = 0;
517 } else if (!args->stateless_rpc
518 || ack != ACK_common)
519 in_vain = 0;
520 mark_common(commit, 0, 1);
521 retval = 0;
522 got_continue = 1;
523 if (ack == ACK_ready) {
524 clear_prio_queue(&rev_list);
525 got_ready = 1;
527 break;
530 } while (ack);
531 flushes--;
532 if (got_continue && MAX_IN_VAIN < in_vain) {
533 print_verbose(args, _("giving up"));
534 break; /* give up */
538 done:
539 if (!got_ready || !no_done) {
540 packet_buf_write(&req_buf, "done\n");
541 send_request(args, fd[1], &req_buf);
543 print_verbose(args, _("done"));
544 if (retval != 0) {
545 multi_ack = 0;
546 flushes++;
548 strbuf_release(&req_buf);
550 if (!got_ready || !no_done)
551 consume_shallow_list(args, fd[0]);
552 while (flushes || multi_ack) {
553 int ack = get_ack(fd[0], result_oid);
554 if (ack) {
555 print_verbose(args, _("got %s (%d) %s"), "ack",
556 ack, oid_to_hex(result_oid));
557 if (ack == ACK)
558 return 0;
559 multi_ack = 1;
560 continue;
562 flushes--;
564 /* it is no error to fetch into a completely empty repo */
565 return count ? retval : 0;
568 static struct commit_list *complete;
570 static int mark_complete(const struct object_id *oid)
572 struct object *o = parse_object(oid);
574 while (o && o->type == OBJ_TAG) {
575 struct tag *t = (struct tag *) o;
576 if (!t->tagged)
577 break; /* broken repository */
578 o->flags |= COMPLETE;
579 o = parse_object(&t->tagged->oid);
581 if (o && o->type == OBJ_COMMIT) {
582 struct commit *commit = (struct commit *)o;
583 if (!(commit->object.flags & COMPLETE)) {
584 commit->object.flags |= COMPLETE;
585 commit_list_insert(commit, &complete);
588 return 0;
591 static int mark_complete_oid(const char *refname, const struct object_id *oid,
592 int flag, void *cb_data)
594 return mark_complete(oid);
597 static void mark_recent_complete_commits(struct fetch_pack_args *args,
598 timestamp_t cutoff)
600 while (complete && cutoff <= complete->item->date) {
601 print_verbose(args, _("Marking %s as complete"),
602 oid_to_hex(&complete->item->object.oid));
603 pop_most_recent_commit(&complete, COMPLETE);
607 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
609 for (; refs; refs = refs->next)
610 oidset_insert(oids, &refs->old_oid);
613 static int tip_oids_contain(struct oidset *tip_oids,
614 struct ref *unmatched, struct ref *newlist,
615 const struct object_id *id)
618 * Note that this only looks at the ref lists the first time it's
619 * called. This works out in filter_refs() because even though it may
620 * add to "newlist" between calls, the additions will always be for
621 * oids that are already in the set.
623 if (!tip_oids->map.map.tablesize) {
624 add_refs_to_oidset(tip_oids, unmatched);
625 add_refs_to_oidset(tip_oids, newlist);
627 return oidset_contains(tip_oids, id);
630 static void filter_refs(struct fetch_pack_args *args,
631 struct ref **refs,
632 struct ref **sought, int nr_sought)
634 struct ref *newlist = NULL;
635 struct ref **newtail = &newlist;
636 struct ref *unmatched = NULL;
637 struct ref *ref, *next;
638 struct oidset tip_oids = OIDSET_INIT;
639 int i;
641 i = 0;
642 for (ref = *refs; ref; ref = next) {
643 int keep = 0;
644 next = ref->next;
646 if (starts_with(ref->name, "refs/") &&
647 check_refname_format(ref->name, 0))
648 ; /* trash */
649 else {
650 while (i < nr_sought) {
651 int cmp = strcmp(ref->name, sought[i]->name);
652 if (cmp < 0)
653 break; /* definitely do not have it */
654 else if (cmp == 0) {
655 keep = 1; /* definitely have it */
656 sought[i]->match_status = REF_MATCHED;
658 i++;
662 if (!keep && args->fetch_all &&
663 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
664 keep = 1;
666 if (keep) {
667 *newtail = ref;
668 ref->next = NULL;
669 newtail = &ref->next;
670 } else {
671 ref->next = unmatched;
672 unmatched = ref;
676 /* Append unmatched requests to the list */
677 for (i = 0; i < nr_sought; i++) {
678 struct object_id oid;
679 const char *p;
681 ref = sought[i];
682 if (ref->match_status != REF_NOT_MATCHED)
683 continue;
684 if (parse_oid_hex(ref->name, &oid, &p) ||
685 *p != '\0' ||
686 oidcmp(&oid, &ref->old_oid))
687 continue;
689 if ((allow_unadvertised_object_request &
690 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
691 tip_oids_contain(&tip_oids, unmatched, newlist,
692 &ref->old_oid)) {
693 ref->match_status = REF_MATCHED;
694 *newtail = copy_ref(ref);
695 newtail = &(*newtail)->next;
696 } else {
697 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
701 oidset_clear(&tip_oids);
702 for (ref = unmatched; ref; ref = next) {
703 next = ref->next;
704 free(ref);
707 *refs = newlist;
710 static void mark_alternate_complete(struct object *obj)
712 mark_complete(&obj->oid);
715 struct loose_object_iter {
716 struct oidset *loose_object_set;
717 struct ref *refs;
721 * If the number of refs is not larger than the number of loose objects,
722 * this function stops inserting.
724 static int add_loose_objects_to_set(const struct object_id *oid,
725 const char *path,
726 void *data)
728 struct loose_object_iter *iter = data;
729 oidset_insert(iter->loose_object_set, oid);
730 if (iter->refs == NULL)
731 return 1;
733 iter->refs = iter->refs->next;
734 return 0;
738 * Mark recent commits available locally and reachable from a local ref as
739 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
740 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
741 * thus do not need COMMON_REF marks).
743 * The cutoff time for recency is determined by this heuristic: it is the
744 * earliest commit time of the objects in refs that are commits and that we know
745 * the commit time of.
747 static void mark_complete_and_common_ref(struct fetch_pack_args *args,
748 struct ref **refs)
750 struct ref *ref;
751 int old_save_commit_buffer = save_commit_buffer;
752 timestamp_t cutoff = 0;
753 struct oidset loose_oid_set = OIDSET_INIT;
754 int use_oidset = 0;
755 struct loose_object_iter iter = {&loose_oid_set, *refs};
757 /* Enumerate all loose objects or know refs are not so many. */
758 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
759 &iter, 0);
761 save_commit_buffer = 0;
763 for (ref = *refs; ref; ref = ref->next) {
764 struct object *o;
765 unsigned int flags = OBJECT_INFO_QUICK;
767 if (use_oidset &&
768 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
770 * I know this does not exist in the loose form,
771 * so check if it exists in a non-loose form.
773 flags |= OBJECT_INFO_IGNORE_LOOSE;
776 if (!has_object_file_with_flags(&ref->old_oid, flags))
777 continue;
778 o = parse_object(&ref->old_oid);
779 if (!o)
780 continue;
782 /* We already have it -- which may mean that we were
783 * in sync with the other side at some time after
784 * that (it is OK if we guess wrong here).
786 if (o->type == OBJ_COMMIT) {
787 struct commit *commit = (struct commit *)o;
788 if (!cutoff || cutoff < commit->date)
789 cutoff = commit->date;
793 oidset_clear(&loose_oid_set);
795 if (!args->no_dependents) {
796 if (!args->deepen) {
797 for_each_ref(mark_complete_oid, NULL);
798 for_each_cached_alternate(mark_alternate_complete);
799 commit_list_sort_by_date(&complete);
800 if (cutoff)
801 mark_recent_complete_commits(args, cutoff);
805 * Mark all complete remote refs as common refs.
806 * Don't mark them common yet; the server has to be told so first.
808 for (ref = *refs; ref; ref = ref->next) {
809 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
810 NULL, 0);
812 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
813 continue;
815 if (!(o->flags & SEEN)) {
816 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
818 mark_common((struct commit *)o, 1, 1);
823 save_commit_buffer = old_save_commit_buffer;
827 * Returns 1 if every object pointed to by the given remote refs is available
828 * locally and reachable from a local ref, and 0 otherwise.
830 static int everything_local(struct fetch_pack_args *args,
831 struct ref **refs)
833 struct ref *ref;
834 int retval;
836 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
837 const struct object_id *remote = &ref->old_oid;
838 struct object *o;
840 o = lookup_object(remote->hash);
841 if (!o || !(o->flags & COMPLETE)) {
842 retval = 0;
843 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
844 ref->name);
845 continue;
847 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
848 ref->name);
851 return retval;
854 static int sideband_demux(int in, int out, void *data)
856 int *xd = data;
857 int ret;
859 ret = recv_sideband("fetch-pack", xd[0], out);
860 close(out);
861 return ret;
864 static int get_pack(struct fetch_pack_args *args,
865 int xd[2], char **pack_lockfile)
867 struct async demux;
868 int do_keep = args->keep_pack;
869 const char *cmd_name;
870 struct pack_header header;
871 int pass_header = 0;
872 struct child_process cmd = CHILD_PROCESS_INIT;
873 int ret;
875 memset(&demux, 0, sizeof(demux));
876 if (use_sideband) {
877 /* xd[] is talking with upload-pack; subprocess reads from
878 * xd[0], spits out band#2 to stderr, and feeds us band#1
879 * through demux->out.
881 demux.proc = sideband_demux;
882 demux.data = xd;
883 demux.out = -1;
884 demux.isolate_sigpipe = 1;
885 if (start_async(&demux))
886 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
888 else
889 demux.out = xd[0];
891 if (!args->keep_pack && unpack_limit) {
893 if (read_pack_header(demux.out, &header))
894 die(_("protocol error: bad pack header"));
895 pass_header = 1;
896 if (ntohl(header.hdr_entries) < unpack_limit)
897 do_keep = 0;
898 else
899 do_keep = 1;
902 if (alternate_shallow_file) {
903 argv_array_push(&cmd.args, "--shallow-file");
904 argv_array_push(&cmd.args, alternate_shallow_file);
907 if (do_keep || args->from_promisor) {
908 if (pack_lockfile)
909 cmd.out = -1;
910 cmd_name = "index-pack";
911 argv_array_push(&cmd.args, cmd_name);
912 argv_array_push(&cmd.args, "--stdin");
913 if (!args->quiet && !args->no_progress)
914 argv_array_push(&cmd.args, "-v");
915 if (args->use_thin_pack)
916 argv_array_push(&cmd.args, "--fix-thin");
917 if (do_keep && (args->lock_pack || unpack_limit)) {
918 char hostname[HOST_NAME_MAX + 1];
919 if (xgethostname(hostname, sizeof(hostname)))
920 xsnprintf(hostname, sizeof(hostname), "localhost");
921 argv_array_pushf(&cmd.args,
922 "--keep=fetch-pack %"PRIuMAX " on %s",
923 (uintmax_t)getpid(), hostname);
925 if (args->check_self_contained_and_connected)
926 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
927 if (args->from_promisor)
928 argv_array_push(&cmd.args, "--promisor");
930 else {
931 cmd_name = "unpack-objects";
932 argv_array_push(&cmd.args, cmd_name);
933 if (args->quiet || args->no_progress)
934 argv_array_push(&cmd.args, "-q");
935 args->check_self_contained_and_connected = 0;
938 if (pass_header)
939 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
940 ntohl(header.hdr_version),
941 ntohl(header.hdr_entries));
942 if (fetch_fsck_objects >= 0
943 ? fetch_fsck_objects
944 : transfer_fsck_objects >= 0
945 ? transfer_fsck_objects
946 : 0) {
947 if (args->from_promisor)
949 * We cannot use --strict in index-pack because it
950 * checks both broken objects and links, but we only
951 * want to check for broken objects.
953 argv_array_push(&cmd.args, "--fsck-objects");
954 else
955 argv_array_push(&cmd.args, "--strict");
958 cmd.in = demux.out;
959 cmd.git_cmd = 1;
960 if (start_command(&cmd))
961 die(_("fetch-pack: unable to fork off %s"), cmd_name);
962 if (do_keep && pack_lockfile) {
963 *pack_lockfile = index_pack_lockfile(cmd.out);
964 close(cmd.out);
967 if (!use_sideband)
968 /* Closed by start_command() */
969 xd[0] = -1;
971 ret = finish_command(&cmd);
972 if (!ret || (args->check_self_contained_and_connected && ret == 1))
973 args->self_contained_and_connected =
974 args->check_self_contained_and_connected &&
975 ret == 0;
976 else
977 die(_("%s failed"), cmd_name);
978 if (use_sideband && finish_async(&demux))
979 die(_("error in sideband demultiplexer"));
980 return 0;
983 static int cmp_ref_by_name(const void *a_, const void *b_)
985 const struct ref *a = *((const struct ref **)a_);
986 const struct ref *b = *((const struct ref **)b_);
987 return strcmp(a->name, b->name);
990 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
991 int fd[2],
992 const struct ref *orig_ref,
993 struct ref **sought, int nr_sought,
994 struct shallow_info *si,
995 char **pack_lockfile)
997 struct ref *ref = copy_ref_list(orig_ref);
998 struct object_id oid;
999 const char *agent_feature;
1000 int agent_len;
1002 sort_ref_list(&ref, ref_compare_name);
1003 QSORT(sought, nr_sought, cmp_ref_by_name);
1005 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
1006 die(_("Server does not support shallow clients"));
1007 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1008 args->deepen = 1;
1009 if (server_supports("multi_ack_detailed")) {
1010 print_verbose(args, _("Server supports multi_ack_detailed"));
1011 multi_ack = 2;
1012 if (server_supports("no-done")) {
1013 print_verbose(args, _("Server supports no-done"));
1014 if (args->stateless_rpc)
1015 no_done = 1;
1018 else if (server_supports("multi_ack")) {
1019 print_verbose(args, _("Server supports multi_ack"));
1020 multi_ack = 1;
1022 if (server_supports("side-band-64k")) {
1023 print_verbose(args, _("Server supports side-band-64k"));
1024 use_sideband = 2;
1026 else if (server_supports("side-band")) {
1027 print_verbose(args, _("Server supports side-band"));
1028 use_sideband = 1;
1030 if (server_supports("allow-tip-sha1-in-want")) {
1031 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1032 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1034 if (server_supports("allow-reachable-sha1-in-want")) {
1035 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1036 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1038 if (!server_supports("thin-pack"))
1039 args->use_thin_pack = 0;
1040 if (!server_supports("no-progress"))
1041 args->no_progress = 0;
1042 if (!server_supports("include-tag"))
1043 args->include_tag = 0;
1044 if (server_supports("ofs-delta"))
1045 print_verbose(args, _("Server supports ofs-delta"));
1046 else
1047 prefer_ofs_delta = 0;
1049 if (server_supports("filter")) {
1050 server_supports_filtering = 1;
1051 print_verbose(args, _("Server supports filter"));
1052 } else if (args->filter_options.choice) {
1053 warning("filtering not recognized by server, ignoring");
1056 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1057 agent_supported = 1;
1058 if (agent_len)
1059 print_verbose(args, _("Server version is %.*s"),
1060 agent_len, agent_feature);
1062 if (server_supports("deepen-since"))
1063 deepen_since_ok = 1;
1064 else if (args->deepen_since)
1065 die(_("Server does not support --shallow-since"));
1066 if (server_supports("deepen-not"))
1067 deepen_not_ok = 1;
1068 else if (args->deepen_not)
1069 die(_("Server does not support --shallow-exclude"));
1070 if (!server_supports("deepen-relative") && args->deepen_relative)
1071 die(_("Server does not support --deepen"));
1073 mark_complete_and_common_ref(args, &ref);
1074 filter_refs(args, &ref, sought, nr_sought);
1075 if (everything_local(args, &ref)) {
1076 packet_flush(fd[1]);
1077 goto all_done;
1079 if (find_common(args, fd, &oid, ref) < 0)
1080 if (!args->keep_pack)
1081 /* When cloning, it is not unusual to have
1082 * no common commit.
1084 warning(_("no common commits"));
1086 if (args->stateless_rpc)
1087 packet_flush(fd[1]);
1088 if (args->deepen)
1089 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1090 NULL);
1091 else if (si->nr_ours || si->nr_theirs)
1092 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1093 else
1094 alternate_shallow_file = NULL;
1095 if (get_pack(args, fd, pack_lockfile))
1096 die(_("git fetch-pack: fetch failed."));
1098 all_done:
1099 return ref;
1102 static void add_shallow_requests(struct strbuf *req_buf,
1103 const struct fetch_pack_args *args)
1105 if (is_repository_shallow())
1106 write_shallow_commits(req_buf, 1, NULL);
1107 if (args->depth > 0)
1108 packet_buf_write(req_buf, "deepen %d", args->depth);
1109 if (args->deepen_since) {
1110 timestamp_t max_age = approxidate(args->deepen_since);
1111 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1113 if (args->deepen_not) {
1114 int i;
1115 for (i = 0; i < args->deepen_not->nr; i++) {
1116 struct string_list_item *s = args->deepen_not->items + i;
1117 packet_buf_write(req_buf, "deepen-not %s", s->string);
1122 static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1124 for ( ; wants ; wants = wants->next) {
1125 const struct object_id *remote = &wants->old_oid;
1126 const char *remote_hex;
1127 struct object *o;
1130 * If that object is complete (i.e. it is an ancestor of a
1131 * local ref), we tell them we have it but do not have to
1132 * tell them about its ancestors, which they already know
1133 * about.
1135 * We use lookup_object here because we are only
1136 * interested in the case we *know* the object is
1137 * reachable and we have already scanned it.
1139 if (((o = lookup_object(remote->hash)) != NULL) &&
1140 (o->flags & COMPLETE)) {
1141 continue;
1144 remote_hex = oid_to_hex(remote);
1145 packet_buf_write(req_buf, "want %s\n", remote_hex);
1149 static void add_common(struct strbuf *req_buf, struct oidset *common)
1151 struct oidset_iter iter;
1152 const struct object_id *oid;
1153 oidset_iter_init(common, &iter);
1155 while ((oid = oidset_iter_next(&iter))) {
1156 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1160 static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1162 int ret = 0;
1163 int haves_added = 0;
1164 const struct object_id *oid;
1166 while ((oid = get_rev())) {
1167 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1168 if (++haves_added >= *haves_to_send)
1169 break;
1172 *in_vain += haves_added;
1173 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1174 /* Send Done */
1175 packet_buf_write(req_buf, "done\n");
1176 ret = 1;
1179 /* Increase haves to send on next round */
1180 *haves_to_send = next_flush(1, *haves_to_send);
1182 return ret;
1185 static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1186 const struct ref *wants, struct oidset *common,
1187 int *haves_to_send, int *in_vain)
1189 int ret = 0;
1190 struct strbuf req_buf = STRBUF_INIT;
1192 if (server_supports_v2("fetch", 1))
1193 packet_buf_write(&req_buf, "command=fetch");
1194 if (server_supports_v2("agent", 0))
1195 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1196 if (args->server_options && args->server_options->nr &&
1197 server_supports_v2("server-option", 1)) {
1198 int i;
1199 for (i = 0; i < args->server_options->nr; i++)
1200 packet_write_fmt(fd_out, "server-option=%s",
1201 args->server_options->items[i].string);
1204 packet_buf_delim(&req_buf);
1205 if (args->use_thin_pack)
1206 packet_buf_write(&req_buf, "thin-pack");
1207 if (args->no_progress)
1208 packet_buf_write(&req_buf, "no-progress");
1209 if (args->include_tag)
1210 packet_buf_write(&req_buf, "include-tag");
1211 if (prefer_ofs_delta)
1212 packet_buf_write(&req_buf, "ofs-delta");
1214 /* Add shallow-info and deepen request */
1215 if (server_supports_feature("fetch", "shallow", 0))
1216 add_shallow_requests(&req_buf, args);
1217 else if (is_repository_shallow() || args->deepen)
1218 die(_("Server does not support shallow requests"));
1220 /* Add filter */
1221 if (server_supports_feature("fetch", "filter", 0) &&
1222 args->filter_options.choice) {
1223 print_verbose(args, _("Server supports filter"));
1224 packet_buf_write(&req_buf, "filter %s",
1225 args->filter_options.filter_spec);
1226 } else if (args->filter_options.choice) {
1227 warning("filtering not recognized by server, ignoring");
1230 /* add wants */
1231 add_wants(wants, &req_buf);
1233 if (args->no_dependents) {
1234 packet_buf_write(&req_buf, "done");
1235 ret = 1;
1236 } else {
1237 /* Add all of the common commits we've found in previous rounds */
1238 add_common(&req_buf, common);
1240 /* Add initial haves */
1241 ret = add_haves(&req_buf, haves_to_send, in_vain);
1244 /* Send request */
1245 packet_buf_flush(&req_buf);
1246 write_or_die(fd_out, req_buf.buf, req_buf.len);
1248 strbuf_release(&req_buf);
1249 return ret;
1253 * Processes a section header in a server's response and checks if it matches
1254 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1255 * not consumed); if 0, the line will be consumed and the function will die if
1256 * the section header doesn't match what was expected.
1258 static int process_section_header(struct packet_reader *reader,
1259 const char *section, int peek)
1261 int ret;
1263 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1264 die("error reading section header '%s'", section);
1266 ret = !strcmp(reader->line, section);
1268 if (!peek) {
1269 if (!ret)
1270 die("expected '%s', received '%s'",
1271 section, reader->line);
1272 packet_reader_read(reader);
1275 return ret;
1278 static int process_acks(struct packet_reader *reader, struct oidset *common)
1280 /* received */
1281 int received_ready = 0;
1282 int received_ack = 0;
1284 process_section_header(reader, "acknowledgments", 0);
1285 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1286 const char *arg;
1288 if (!strcmp(reader->line, "NAK"))
1289 continue;
1291 if (skip_prefix(reader->line, "ACK ", &arg)) {
1292 struct object_id oid;
1293 if (!get_oid_hex(arg, &oid)) {
1294 struct commit *commit;
1295 oidset_insert(common, &oid);
1296 commit = lookup_commit(&oid);
1297 mark_common(commit, 0, 1);
1299 continue;
1302 if (!strcmp(reader->line, "ready")) {
1303 clear_prio_queue(&rev_list);
1304 received_ready = 1;
1305 continue;
1308 die("unexpected acknowledgment line: '%s'", reader->line);
1311 if (reader->status != PACKET_READ_FLUSH &&
1312 reader->status != PACKET_READ_DELIM)
1313 die("error processing acks: %d", reader->status);
1315 /* return 0 if no common, 1 if there are common, or 2 if ready */
1316 return received_ready ? 2 : (received_ack ? 1 : 0);
1319 static void receive_shallow_info(struct fetch_pack_args *args,
1320 struct packet_reader *reader)
1322 process_section_header(reader, "shallow-info", 0);
1323 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1324 const char *arg;
1325 struct object_id oid;
1327 if (skip_prefix(reader->line, "shallow ", &arg)) {
1328 if (get_oid_hex(arg, &oid))
1329 die(_("invalid shallow line: %s"), reader->line);
1330 register_shallow(&oid);
1331 continue;
1333 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1334 if (get_oid_hex(arg, &oid))
1335 die(_("invalid unshallow line: %s"), reader->line);
1336 if (!lookup_object(oid.hash))
1337 die(_("object not found: %s"), reader->line);
1338 /* make sure that it is parsed as shallow */
1339 if (!parse_object(&oid))
1340 die(_("error in object: %s"), reader->line);
1341 if (unregister_shallow(&oid))
1342 die(_("no shallow found: %s"), reader->line);
1343 continue;
1345 die(_("expected shallow/unshallow, got %s"), reader->line);
1348 if (reader->status != PACKET_READ_FLUSH &&
1349 reader->status != PACKET_READ_DELIM)
1350 die("error processing shallow info: %d", reader->status);
1352 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1353 args->deepen = 1;
1356 enum fetch_state {
1357 FETCH_CHECK_LOCAL = 0,
1358 FETCH_SEND_REQUEST,
1359 FETCH_PROCESS_ACKS,
1360 FETCH_GET_PACK,
1361 FETCH_DONE,
1364 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1365 int fd[2],
1366 const struct ref *orig_ref,
1367 struct ref **sought, int nr_sought,
1368 char **pack_lockfile)
1370 struct ref *ref = copy_ref_list(orig_ref);
1371 enum fetch_state state = FETCH_CHECK_LOCAL;
1372 struct oidset common = OIDSET_INIT;
1373 struct packet_reader reader;
1374 int in_vain = 0;
1375 int haves_to_send = INITIAL_FLUSH;
1376 packet_reader_init(&reader, fd[0], NULL, 0,
1377 PACKET_READ_CHOMP_NEWLINE);
1379 while (state != FETCH_DONE) {
1380 switch (state) {
1381 case FETCH_CHECK_LOCAL:
1382 sort_ref_list(&ref, ref_compare_name);
1383 QSORT(sought, nr_sought, cmp_ref_by_name);
1385 /* v2 supports these by default */
1386 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1387 use_sideband = 2;
1388 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1389 args->deepen = 1;
1391 if (marked)
1392 for_each_ref(clear_marks, NULL);
1393 marked = 1;
1395 for_each_ref(rev_list_insert_ref_oid, NULL);
1396 for_each_cached_alternate(insert_one_alternate_object);
1398 /* Filter 'ref' by 'sought' and those that aren't local */
1399 mark_complete_and_common_ref(args, &ref);
1400 filter_refs(args, &ref, sought, nr_sought);
1401 if (everything_local(args, &ref))
1402 state = FETCH_DONE;
1403 else
1404 state = FETCH_SEND_REQUEST;
1405 break;
1406 case FETCH_SEND_REQUEST:
1407 if (send_fetch_request(fd[1], args, ref, &common,
1408 &haves_to_send, &in_vain))
1409 state = FETCH_GET_PACK;
1410 else
1411 state = FETCH_PROCESS_ACKS;
1412 break;
1413 case FETCH_PROCESS_ACKS:
1414 /* Process ACKs/NAKs */
1415 switch (process_acks(&reader, &common)) {
1416 case 2:
1417 state = FETCH_GET_PACK;
1418 break;
1419 case 1:
1420 in_vain = 0;
1421 /* fallthrough */
1422 default:
1423 state = FETCH_SEND_REQUEST;
1424 break;
1426 break;
1427 case FETCH_GET_PACK:
1428 /* Check for shallow-info section */
1429 if (process_section_header(&reader, "shallow-info", 1))
1430 receive_shallow_info(args, &reader);
1432 /* get the pack */
1433 process_section_header(&reader, "packfile", 0);
1434 if (get_pack(args, fd, pack_lockfile))
1435 die(_("git fetch-pack: fetch failed."));
1437 state = FETCH_DONE;
1438 break;
1439 case FETCH_DONE:
1440 continue;
1444 oidset_clear(&common);
1445 return ref;
1448 static void fetch_pack_config(void)
1450 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1451 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1452 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1453 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1454 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1456 git_config(git_default_config, NULL);
1459 static void fetch_pack_setup(void)
1461 static int did_setup;
1462 if (did_setup)
1463 return;
1464 fetch_pack_config();
1465 if (0 <= transfer_unpack_limit)
1466 unpack_limit = transfer_unpack_limit;
1467 else if (0 <= fetch_unpack_limit)
1468 unpack_limit = fetch_unpack_limit;
1469 did_setup = 1;
1472 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1474 struct string_list names = STRING_LIST_INIT_NODUP;
1475 int src, dst;
1477 for (src = dst = 0; src < nr; src++) {
1478 struct string_list_item *item;
1479 item = string_list_insert(&names, ref[src]->name);
1480 if (item->util)
1481 continue; /* already have it */
1482 item->util = ref[src];
1483 if (src != dst)
1484 ref[dst] = ref[src];
1485 dst++;
1487 for (src = dst; src < nr; src++)
1488 ref[src] = NULL;
1489 string_list_clear(&names, 0);
1490 return dst;
1493 static void update_shallow(struct fetch_pack_args *args,
1494 struct ref **sought, int nr_sought,
1495 struct shallow_info *si)
1497 struct oid_array ref = OID_ARRAY_INIT;
1498 int *status;
1499 int i;
1501 if (args->deepen && alternate_shallow_file) {
1502 if (*alternate_shallow_file == '\0') { /* --unshallow */
1503 unlink_or_warn(git_path_shallow());
1504 rollback_lock_file(&shallow_lock);
1505 } else
1506 commit_lock_file(&shallow_lock);
1507 return;
1510 if (!si->shallow || !si->shallow->nr)
1511 return;
1513 if (args->cloning) {
1515 * remote is shallow, but this is a clone, there are
1516 * no objects in repo to worry about. Accept any
1517 * shallow points that exist in the pack (iow in repo
1518 * after get_pack() and reprepare_packed_git())
1520 struct oid_array extra = OID_ARRAY_INIT;
1521 struct object_id *oid = si->shallow->oid;
1522 for (i = 0; i < si->shallow->nr; i++)
1523 if (has_object_file(&oid[i]))
1524 oid_array_append(&extra, &oid[i]);
1525 if (extra.nr) {
1526 setup_alternate_shallow(&shallow_lock,
1527 &alternate_shallow_file,
1528 &extra);
1529 commit_lock_file(&shallow_lock);
1531 oid_array_clear(&extra);
1532 return;
1535 if (!si->nr_ours && !si->nr_theirs)
1536 return;
1538 remove_nonexistent_theirs_shallow(si);
1539 if (!si->nr_ours && !si->nr_theirs)
1540 return;
1541 for (i = 0; i < nr_sought; i++)
1542 oid_array_append(&ref, &sought[i]->old_oid);
1543 si->ref = &ref;
1545 if (args->update_shallow) {
1547 * remote is also shallow, .git/shallow may be updated
1548 * so all refs can be accepted. Make sure we only add
1549 * shallow roots that are actually reachable from new
1550 * refs.
1552 struct oid_array extra = OID_ARRAY_INIT;
1553 struct object_id *oid = si->shallow->oid;
1554 assign_shallow_commits_to_refs(si, NULL, NULL);
1555 if (!si->nr_ours && !si->nr_theirs) {
1556 oid_array_clear(&ref);
1557 return;
1559 for (i = 0; i < si->nr_ours; i++)
1560 oid_array_append(&extra, &oid[si->ours[i]]);
1561 for (i = 0; i < si->nr_theirs; i++)
1562 oid_array_append(&extra, &oid[si->theirs[i]]);
1563 setup_alternate_shallow(&shallow_lock,
1564 &alternate_shallow_file,
1565 &extra);
1566 commit_lock_file(&shallow_lock);
1567 oid_array_clear(&extra);
1568 oid_array_clear(&ref);
1569 return;
1573 * remote is also shallow, check what ref is safe to update
1574 * without updating .git/shallow
1576 status = xcalloc(nr_sought, sizeof(*status));
1577 assign_shallow_commits_to_refs(si, NULL, status);
1578 if (si->nr_ours || si->nr_theirs) {
1579 for (i = 0; i < nr_sought; i++)
1580 if (status[i])
1581 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1583 free(status);
1584 oid_array_clear(&ref);
1587 struct ref *fetch_pack(struct fetch_pack_args *args,
1588 int fd[], struct child_process *conn,
1589 const struct ref *ref,
1590 const char *dest,
1591 struct ref **sought, int nr_sought,
1592 struct oid_array *shallow,
1593 char **pack_lockfile,
1594 enum protocol_version version)
1596 struct ref *ref_cpy;
1597 struct shallow_info si;
1599 fetch_pack_setup();
1600 if (nr_sought)
1601 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1603 if (!ref) {
1604 packet_flush(fd[1]);
1605 die(_("no matching remote head"));
1607 prepare_shallow_info(&si, shallow);
1608 if (version == protocol_v2)
1609 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1610 pack_lockfile);
1611 else
1612 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1613 &si, pack_lockfile);
1614 reprepare_packed_git(the_repository);
1615 update_shallow(args, sought, nr_sought, &si);
1616 clear_shallow_info(&si);
1617 return ref_cpy;
1620 int report_unmatched_refs(struct ref **sought, int nr_sought)
1622 int i, ret = 0;
1624 for (i = 0; i < nr_sought; i++) {
1625 if (!sought[i])
1626 continue;
1627 switch (sought[i]->match_status) {
1628 case REF_MATCHED:
1629 continue;
1630 case REF_NOT_MATCHED:
1631 error(_("no such remote ref %s"), sought[i]->name);
1632 break;
1633 case REF_UNADVERTISED_NOT_ALLOWED:
1634 error(_("Server does not allow request for unadvertised object %s"),
1635 sought[i]->name);
1636 break;
1638 ret = 1;
1640 return ret;