Merge branch 'sb/object-store'
[git.git] / fetch-pack.c
blob52932b37f8dce61296c37d6fa821d564f0f4a38b
1 #include "cache.h"
2 #include "config.h"
3 #include "lockfile.h"
4 #include "refs.h"
5 #include "pkt-line.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "exec_cmd.h"
9 #include "pack.h"
10 #include "sideband.h"
11 #include "fetch-pack.h"
12 #include "remote.h"
13 #include "run-command.h"
14 #include "connect.h"
15 #include "transport.h"
16 #include "version.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
19 #include "oidset.h"
20 #include "packfile.h"
22 static int transfer_unpack_limit = -1;
23 static int fetch_unpack_limit = -1;
24 static int unpack_limit = 100;
25 static int prefer_ofs_delta = 1;
26 static int no_done;
27 static int deepen_since_ok;
28 static int deepen_not_ok;
29 static int fetch_fsck_objects = -1;
30 static int transfer_fsck_objects = -1;
31 static int agent_supported;
32 static int server_supports_filtering;
33 static struct lock_file shallow_lock;
34 static const char *alternate_shallow_file;
36 /* Remember to update object flag allocation in object.h */
37 #define COMPLETE (1U << 0)
38 #define COMMON (1U << 1)
39 #define COMMON_REF (1U << 2)
40 #define SEEN (1U << 3)
41 #define POPPED (1U << 4)
42 #define ALTERNATE (1U << 5)
44 static int marked;
47 * After sending this many "have"s if we do not get any new ACK , we
48 * give up traversing our history.
50 #define MAX_IN_VAIN 256
52 static struct prio_queue rev_list = { compare_commits_by_commit_date };
53 static int non_common_revs, multi_ack, use_sideband;
54 /* Allow specifying sha1 if it is a ref tip. */
55 #define ALLOW_TIP_SHA1 01
56 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
57 #define ALLOW_REACHABLE_SHA1 02
58 static unsigned int allow_unadvertised_object_request;
60 __attribute__((format (printf, 2, 3)))
61 static inline void print_verbose(const struct fetch_pack_args *args,
62 const char *fmt, ...)
64 va_list params;
66 if (!args->verbose)
67 return;
69 va_start(params, fmt);
70 vfprintf(stderr, fmt, params);
71 va_end(params);
72 fputc('\n', stderr);
75 struct alternate_object_cache {
76 struct object **items;
77 size_t nr, alloc;
80 static void cache_one_alternate(const char *refname,
81 const struct object_id *oid,
82 void *vcache)
84 struct alternate_object_cache *cache = vcache;
85 struct object *obj = parse_object(oid);
87 if (!obj || (obj->flags & ALTERNATE))
88 return;
90 obj->flags |= ALTERNATE;
91 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
92 cache->items[cache->nr++] = obj;
95 static void for_each_cached_alternate(void (*cb)(struct object *))
97 static int initialized;
98 static struct alternate_object_cache cache;
99 size_t i;
101 if (!initialized) {
102 for_each_alternate_ref(cache_one_alternate, &cache);
103 initialized = 1;
106 for (i = 0; i < cache.nr; i++)
107 cb(cache.items[i]);
110 static void rev_list_push(struct commit *commit, int mark)
112 if (!(commit->object.flags & mark)) {
113 commit->object.flags |= mark;
115 if (parse_commit(commit))
116 return;
118 prio_queue_put(&rev_list, commit);
120 if (!(commit->object.flags & COMMON))
121 non_common_revs++;
125 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
127 struct object *o = deref_tag(parse_object(oid), refname, 0);
129 if (o && o->type == OBJ_COMMIT)
130 rev_list_push((struct commit *)o, SEEN);
132 return 0;
135 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
136 int flag, void *cb_data)
138 return rev_list_insert_ref(refname, oid);
141 static int clear_marks(const char *refname, const struct object_id *oid,
142 int flag, void *cb_data)
144 struct object *o = deref_tag(parse_object(oid), refname, 0);
146 if (o && o->type == OBJ_COMMIT)
147 clear_commit_marks((struct commit *)o,
148 COMMON | COMMON_REF | SEEN | POPPED);
149 return 0;
153 This function marks a rev and its ancestors as common.
154 In some cases, it is desirable to mark only the ancestors (for example
155 when only the server does not yet know that they are common).
158 static void mark_common(struct commit *commit,
159 int ancestors_only, int dont_parse)
161 if (commit != NULL && !(commit->object.flags & COMMON)) {
162 struct object *o = (struct object *)commit;
164 if (!ancestors_only)
165 o->flags |= COMMON;
167 if (!(o->flags & SEEN))
168 rev_list_push(commit, SEEN);
169 else {
170 struct commit_list *parents;
172 if (!ancestors_only && !(o->flags & POPPED))
173 non_common_revs--;
174 if (!o->parsed && !dont_parse)
175 if (parse_commit(commit))
176 return;
178 for (parents = commit->parents;
179 parents;
180 parents = parents->next)
181 mark_common(parents->item, 0, dont_parse);
187 Get the next rev to send, ignoring the common.
190 static const struct object_id *get_rev(void)
192 struct commit *commit = NULL;
194 while (commit == NULL) {
195 unsigned int mark;
196 struct commit_list *parents;
198 if (rev_list.nr == 0 || non_common_revs == 0)
199 return NULL;
201 commit = prio_queue_get(&rev_list);
202 parse_commit(commit);
203 parents = commit->parents;
205 commit->object.flags |= POPPED;
206 if (!(commit->object.flags & COMMON))
207 non_common_revs--;
209 if (commit->object.flags & COMMON) {
210 /* do not send "have", and ignore ancestors */
211 commit = NULL;
212 mark = COMMON | SEEN;
213 } else if (commit->object.flags & COMMON_REF)
214 /* send "have", and ignore ancestors */
215 mark = COMMON | SEEN;
216 else
217 /* send "have", also for its ancestors */
218 mark = SEEN;
220 while (parents) {
221 if (!(parents->item->object.flags & SEEN))
222 rev_list_push(parents->item, mark);
223 if (mark & COMMON)
224 mark_common(parents->item, 1, 0);
225 parents = parents->next;
229 return &commit->object.oid;
232 enum ack_type {
233 NAK = 0,
234 ACK,
235 ACK_continue,
236 ACK_common,
237 ACK_ready
240 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
242 if (args->stateless_rpc && args->deepen) {
243 /* If we sent a depth we will get back "duplicate"
244 * shallow and unshallow commands every time there
245 * is a block of have lines exchanged.
247 char *line;
248 while ((line = packet_read_line(fd, NULL))) {
249 if (starts_with(line, "shallow "))
250 continue;
251 if (starts_with(line, "unshallow "))
252 continue;
253 die(_("git fetch-pack: expected shallow list"));
258 static enum ack_type get_ack(int fd, struct object_id *result_oid)
260 int len;
261 char *line = packet_read_line(fd, &len);
262 const char *arg;
264 if (!line)
265 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
266 if (!strcmp(line, "NAK"))
267 return NAK;
268 if (skip_prefix(line, "ACK ", &arg)) {
269 if (!get_oid_hex(arg, result_oid)) {
270 arg += 40;
271 len -= arg - line;
272 if (len < 1)
273 return ACK;
274 if (strstr(arg, "continue"))
275 return ACK_continue;
276 if (strstr(arg, "common"))
277 return ACK_common;
278 if (strstr(arg, "ready"))
279 return ACK_ready;
280 return ACK;
283 if (skip_prefix(line, "ERR ", &arg))
284 die(_("remote error: %s"), arg);
285 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
288 static void send_request(struct fetch_pack_args *args,
289 int fd, struct strbuf *buf)
291 if (args->stateless_rpc) {
292 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
293 packet_flush(fd);
294 } else
295 write_or_die(fd, buf->buf, buf->len);
298 static void insert_one_alternate_object(struct object *obj)
300 rev_list_insert_ref(NULL, &obj->oid);
303 #define INITIAL_FLUSH 16
304 #define PIPESAFE_FLUSH 32
305 #define LARGE_FLUSH 16384
307 static int next_flush(struct fetch_pack_args *args, int count)
309 if (args->stateless_rpc) {
310 if (count < LARGE_FLUSH)
311 count <<= 1;
312 else
313 count = count * 11 / 10;
314 } else {
315 if (count < PIPESAFE_FLUSH)
316 count <<= 1;
317 else
318 count += PIPESAFE_FLUSH;
320 return count;
323 static int find_common(struct fetch_pack_args *args,
324 int fd[2], struct object_id *result_oid,
325 struct ref *refs)
327 int fetching;
328 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
329 const struct object_id *oid;
330 unsigned in_vain = 0;
331 int got_continue = 0;
332 int got_ready = 0;
333 struct strbuf req_buf = STRBUF_INIT;
334 size_t state_len = 0;
336 if (args->stateless_rpc && multi_ack == 1)
337 die(_("--stateless-rpc requires multi_ack_detailed"));
338 if (marked)
339 for_each_ref(clear_marks, NULL);
340 marked = 1;
342 for_each_ref(rev_list_insert_ref_oid, NULL);
343 for_each_cached_alternate(insert_one_alternate_object);
345 fetching = 0;
346 for ( ; refs ; refs = refs->next) {
347 struct object_id *remote = &refs->old_oid;
348 const char *remote_hex;
349 struct object *o;
352 * If that object is complete (i.e. it is an ancestor of a
353 * local ref), we tell them we have it but do not have to
354 * tell them about its ancestors, which they already know
355 * about.
357 * We use lookup_object here because we are only
358 * interested in the case we *know* the object is
359 * reachable and we have already scanned it.
361 if (((o = lookup_object(remote->hash)) != NULL) &&
362 (o->flags & COMPLETE)) {
363 continue;
366 remote_hex = oid_to_hex(remote);
367 if (!fetching) {
368 struct strbuf c = STRBUF_INIT;
369 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
370 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
371 if (no_done) strbuf_addstr(&c, " no-done");
372 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
373 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
374 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
375 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
376 if (args->no_progress) strbuf_addstr(&c, " no-progress");
377 if (args->include_tag) strbuf_addstr(&c, " include-tag");
378 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
379 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
380 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
381 if (agent_supported) strbuf_addf(&c, " agent=%s",
382 git_user_agent_sanitized());
383 if (args->filter_options.choice)
384 strbuf_addstr(&c, " filter");
385 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
386 strbuf_release(&c);
387 } else
388 packet_buf_write(&req_buf, "want %s\n", remote_hex);
389 fetching++;
392 if (!fetching) {
393 strbuf_release(&req_buf);
394 packet_flush(fd[1]);
395 return 1;
398 if (is_repository_shallow())
399 write_shallow_commits(&req_buf, 1, NULL);
400 if (args->depth > 0)
401 packet_buf_write(&req_buf, "deepen %d", args->depth);
402 if (args->deepen_since) {
403 timestamp_t max_age = approxidate(args->deepen_since);
404 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
406 if (args->deepen_not) {
407 int i;
408 for (i = 0; i < args->deepen_not->nr; i++) {
409 struct string_list_item *s = args->deepen_not->items + i;
410 packet_buf_write(&req_buf, "deepen-not %s", s->string);
413 if (server_supports_filtering && args->filter_options.choice)
414 packet_buf_write(&req_buf, "filter %s",
415 args->filter_options.filter_spec);
416 packet_buf_flush(&req_buf);
417 state_len = req_buf.len;
419 if (args->deepen) {
420 char *line;
421 const char *arg;
422 struct object_id oid;
424 send_request(args, fd[1], &req_buf);
425 while ((line = packet_read_line(fd[0], NULL))) {
426 if (skip_prefix(line, "shallow ", &arg)) {
427 if (get_oid_hex(arg, &oid))
428 die(_("invalid shallow line: %s"), line);
429 register_shallow(&oid);
430 continue;
432 if (skip_prefix(line, "unshallow ", &arg)) {
433 if (get_oid_hex(arg, &oid))
434 die(_("invalid unshallow line: %s"), line);
435 if (!lookup_object(oid.hash))
436 die(_("object not found: %s"), line);
437 /* make sure that it is parsed as shallow */
438 if (!parse_object(&oid))
439 die(_("error in object: %s"), line);
440 if (unregister_shallow(&oid))
441 die(_("no shallow found: %s"), line);
442 continue;
444 die(_("expected shallow/unshallow, got %s"), line);
446 } else if (!args->stateless_rpc)
447 send_request(args, fd[1], &req_buf);
449 if (!args->stateless_rpc) {
450 /* If we aren't using the stateless-rpc interface
451 * we don't need to retain the headers.
453 strbuf_setlen(&req_buf, 0);
454 state_len = 0;
457 flushes = 0;
458 retval = -1;
459 if (args->no_dependents)
460 goto done;
461 while ((oid = get_rev())) {
462 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
463 print_verbose(args, "have %s", oid_to_hex(oid));
464 in_vain++;
465 if (flush_at <= ++count) {
466 int ack;
468 packet_buf_flush(&req_buf);
469 send_request(args, fd[1], &req_buf);
470 strbuf_setlen(&req_buf, state_len);
471 flushes++;
472 flush_at = next_flush(args, count);
475 * We keep one window "ahead" of the other side, and
476 * will wait for an ACK only on the next one
478 if (!args->stateless_rpc && count == INITIAL_FLUSH)
479 continue;
481 consume_shallow_list(args, fd[0]);
482 do {
483 ack = get_ack(fd[0], result_oid);
484 if (ack)
485 print_verbose(args, _("got %s %d %s"), "ack",
486 ack, oid_to_hex(result_oid));
487 switch (ack) {
488 case ACK:
489 flushes = 0;
490 multi_ack = 0;
491 retval = 0;
492 goto done;
493 case ACK_common:
494 case ACK_ready:
495 case ACK_continue: {
496 struct commit *commit =
497 lookup_commit(result_oid);
498 if (!commit)
499 die(_("invalid commit %s"), oid_to_hex(result_oid));
500 if (args->stateless_rpc
501 && ack == ACK_common
502 && !(commit->object.flags & COMMON)) {
503 /* We need to replay the have for this object
504 * on the next RPC request so the peer knows
505 * it is in common with us.
507 const char *hex = oid_to_hex(result_oid);
508 packet_buf_write(&req_buf, "have %s\n", hex);
509 state_len = req_buf.len;
511 * Reset in_vain because an ack
512 * for this commit has not been
513 * seen.
515 in_vain = 0;
516 } else if (!args->stateless_rpc
517 || ack != ACK_common)
518 in_vain = 0;
519 mark_common(commit, 0, 1);
520 retval = 0;
521 got_continue = 1;
522 if (ack == ACK_ready) {
523 clear_prio_queue(&rev_list);
524 got_ready = 1;
526 break;
529 } while (ack);
530 flushes--;
531 if (got_continue && MAX_IN_VAIN < in_vain) {
532 print_verbose(args, _("giving up"));
533 break; /* give up */
537 done:
538 if (!got_ready || !no_done) {
539 packet_buf_write(&req_buf, "done\n");
540 send_request(args, fd[1], &req_buf);
542 print_verbose(args, _("done"));
543 if (retval != 0) {
544 multi_ack = 0;
545 flushes++;
547 strbuf_release(&req_buf);
549 if (!got_ready || !no_done)
550 consume_shallow_list(args, fd[0]);
551 while (flushes || multi_ack) {
552 int ack = get_ack(fd[0], result_oid);
553 if (ack) {
554 print_verbose(args, _("got %s (%d) %s"), "ack",
555 ack, oid_to_hex(result_oid));
556 if (ack == ACK)
557 return 0;
558 multi_ack = 1;
559 continue;
561 flushes--;
563 /* it is no error to fetch into a completely empty repo */
564 return count ? retval : 0;
567 static struct commit_list *complete;
569 static int mark_complete(const struct object_id *oid)
571 struct object *o = parse_object(oid);
573 while (o && o->type == OBJ_TAG) {
574 struct tag *t = (struct tag *) o;
575 if (!t->tagged)
576 break; /* broken repository */
577 o->flags |= COMPLETE;
578 o = parse_object(&t->tagged->oid);
580 if (o && o->type == OBJ_COMMIT) {
581 struct commit *commit = (struct commit *)o;
582 if (!(commit->object.flags & COMPLETE)) {
583 commit->object.flags |= COMPLETE;
584 commit_list_insert(commit, &complete);
587 return 0;
590 static int mark_complete_oid(const char *refname, const struct object_id *oid,
591 int flag, void *cb_data)
593 return mark_complete(oid);
596 static void mark_recent_complete_commits(struct fetch_pack_args *args,
597 timestamp_t cutoff)
599 while (complete && cutoff <= complete->item->date) {
600 print_verbose(args, _("Marking %s as complete"),
601 oid_to_hex(&complete->item->object.oid));
602 pop_most_recent_commit(&complete, COMPLETE);
606 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
608 for (; refs; refs = refs->next)
609 oidset_insert(oids, &refs->old_oid);
612 static int tip_oids_contain(struct oidset *tip_oids,
613 struct ref *unmatched, struct ref *newlist,
614 const struct object_id *id)
617 * Note that this only looks at the ref lists the first time it's
618 * called. This works out in filter_refs() because even though it may
619 * add to "newlist" between calls, the additions will always be for
620 * oids that are already in the set.
622 if (!tip_oids->map.map.tablesize) {
623 add_refs_to_oidset(tip_oids, unmatched);
624 add_refs_to_oidset(tip_oids, newlist);
626 return oidset_contains(tip_oids, id);
629 static void filter_refs(struct fetch_pack_args *args,
630 struct ref **refs,
631 struct ref **sought, int nr_sought)
633 struct ref *newlist = NULL;
634 struct ref **newtail = &newlist;
635 struct ref *unmatched = NULL;
636 struct ref *ref, *next;
637 struct oidset tip_oids = OIDSET_INIT;
638 int i;
640 i = 0;
641 for (ref = *refs; ref; ref = next) {
642 int keep = 0;
643 next = ref->next;
645 if (starts_with(ref->name, "refs/") &&
646 check_refname_format(ref->name, 0))
647 ; /* trash */
648 else {
649 while (i < nr_sought) {
650 int cmp = strcmp(ref->name, sought[i]->name);
651 if (cmp < 0)
652 break; /* definitely do not have it */
653 else if (cmp == 0) {
654 keep = 1; /* definitely have it */
655 sought[i]->match_status = REF_MATCHED;
657 i++;
661 if (!keep && args->fetch_all &&
662 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
663 keep = 1;
665 if (keep) {
666 *newtail = ref;
667 ref->next = NULL;
668 newtail = &ref->next;
669 } else {
670 ref->next = unmatched;
671 unmatched = ref;
675 /* Append unmatched requests to the list */
676 for (i = 0; i < nr_sought; i++) {
677 struct object_id oid;
678 const char *p;
680 ref = sought[i];
681 if (ref->match_status != REF_NOT_MATCHED)
682 continue;
683 if (parse_oid_hex(ref->name, &oid, &p) ||
684 *p != '\0' ||
685 oidcmp(&oid, &ref->old_oid))
686 continue;
688 if ((allow_unadvertised_object_request &
689 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
690 tip_oids_contain(&tip_oids, unmatched, newlist,
691 &ref->old_oid)) {
692 ref->match_status = REF_MATCHED;
693 *newtail = copy_ref(ref);
694 newtail = &(*newtail)->next;
695 } else {
696 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
700 oidset_clear(&tip_oids);
701 for (ref = unmatched; ref; ref = next) {
702 next = ref->next;
703 free(ref);
706 *refs = newlist;
709 static void mark_alternate_complete(struct object *obj)
711 mark_complete(&obj->oid);
714 struct loose_object_iter {
715 struct oidset *loose_object_set;
716 struct ref *refs;
720 * If the number of refs is not larger than the number of loose objects,
721 * this function stops inserting.
723 static int add_loose_objects_to_set(const struct object_id *oid,
724 const char *path,
725 void *data)
727 struct loose_object_iter *iter = data;
728 oidset_insert(iter->loose_object_set, oid);
729 if (iter->refs == NULL)
730 return 1;
732 iter->refs = iter->refs->next;
733 return 0;
736 static int everything_local(struct fetch_pack_args *args,
737 struct ref **refs,
738 struct ref **sought, int nr_sought)
740 struct ref *ref;
741 int retval;
742 int old_save_commit_buffer = save_commit_buffer;
743 timestamp_t cutoff = 0;
744 struct oidset loose_oid_set = OIDSET_INIT;
745 int use_oidset = 0;
746 struct loose_object_iter iter = {&loose_oid_set, *refs};
748 /* Enumerate all loose objects or know refs are not so many. */
749 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
750 &iter, 0);
752 save_commit_buffer = 0;
754 for (ref = *refs; ref; ref = ref->next) {
755 struct object *o;
756 unsigned int flags = OBJECT_INFO_QUICK;
758 if (use_oidset &&
759 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
761 * I know this does not exist in the loose form,
762 * so check if it exists in a non-loose form.
764 flags |= OBJECT_INFO_IGNORE_LOOSE;
767 if (!has_object_file_with_flags(&ref->old_oid, flags))
768 continue;
769 o = parse_object(&ref->old_oid);
770 if (!o)
771 continue;
773 /* We already have it -- which may mean that we were
774 * in sync with the other side at some time after
775 * that (it is OK if we guess wrong here).
777 if (o->type == OBJ_COMMIT) {
778 struct commit *commit = (struct commit *)o;
779 if (!cutoff || cutoff < commit->date)
780 cutoff = commit->date;
784 oidset_clear(&loose_oid_set);
786 if (!args->no_dependents) {
787 if (!args->deepen) {
788 for_each_ref(mark_complete_oid, NULL);
789 for_each_cached_alternate(mark_alternate_complete);
790 commit_list_sort_by_date(&complete);
791 if (cutoff)
792 mark_recent_complete_commits(args, cutoff);
796 * Mark all complete remote refs as common refs.
797 * Don't mark them common yet; the server has to be told so first.
799 for (ref = *refs; ref; ref = ref->next) {
800 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
801 NULL, 0);
803 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
804 continue;
806 if (!(o->flags & SEEN)) {
807 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
809 mark_common((struct commit *)o, 1, 1);
814 filter_refs(args, refs, sought, nr_sought);
816 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
817 const struct object_id *remote = &ref->old_oid;
818 struct object *o;
820 o = lookup_object(remote->hash);
821 if (!o || !(o->flags & COMPLETE)) {
822 retval = 0;
823 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
824 ref->name);
825 continue;
827 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
828 ref->name);
831 save_commit_buffer = old_save_commit_buffer;
833 return retval;
836 static int sideband_demux(int in, int out, void *data)
838 int *xd = data;
839 int ret;
841 ret = recv_sideband("fetch-pack", xd[0], out);
842 close(out);
843 return ret;
846 static int get_pack(struct fetch_pack_args *args,
847 int xd[2], char **pack_lockfile)
849 struct async demux;
850 int do_keep = args->keep_pack;
851 const char *cmd_name;
852 struct pack_header header;
853 int pass_header = 0;
854 struct child_process cmd = CHILD_PROCESS_INIT;
855 int ret;
857 memset(&demux, 0, sizeof(demux));
858 if (use_sideband) {
859 /* xd[] is talking with upload-pack; subprocess reads from
860 * xd[0], spits out band#2 to stderr, and feeds us band#1
861 * through demux->out.
863 demux.proc = sideband_demux;
864 demux.data = xd;
865 demux.out = -1;
866 demux.isolate_sigpipe = 1;
867 if (start_async(&demux))
868 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
870 else
871 demux.out = xd[0];
873 if (!args->keep_pack && unpack_limit) {
875 if (read_pack_header(demux.out, &header))
876 die(_("protocol error: bad pack header"));
877 pass_header = 1;
878 if (ntohl(header.hdr_entries) < unpack_limit)
879 do_keep = 0;
880 else
881 do_keep = 1;
884 if (alternate_shallow_file) {
885 argv_array_push(&cmd.args, "--shallow-file");
886 argv_array_push(&cmd.args, alternate_shallow_file);
889 if (do_keep || args->from_promisor) {
890 if (pack_lockfile)
891 cmd.out = -1;
892 cmd_name = "index-pack";
893 argv_array_push(&cmd.args, cmd_name);
894 argv_array_push(&cmd.args, "--stdin");
895 if (!args->quiet && !args->no_progress)
896 argv_array_push(&cmd.args, "-v");
897 if (args->use_thin_pack)
898 argv_array_push(&cmd.args, "--fix-thin");
899 if (do_keep && (args->lock_pack || unpack_limit)) {
900 char hostname[HOST_NAME_MAX + 1];
901 if (xgethostname(hostname, sizeof(hostname)))
902 xsnprintf(hostname, sizeof(hostname), "localhost");
903 argv_array_pushf(&cmd.args,
904 "--keep=fetch-pack %"PRIuMAX " on %s",
905 (uintmax_t)getpid(), hostname);
907 if (args->check_self_contained_and_connected)
908 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
909 if (args->from_promisor)
910 argv_array_push(&cmd.args, "--promisor");
912 else {
913 cmd_name = "unpack-objects";
914 argv_array_push(&cmd.args, cmd_name);
915 if (args->quiet || args->no_progress)
916 argv_array_push(&cmd.args, "-q");
917 args->check_self_contained_and_connected = 0;
920 if (pass_header)
921 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
922 ntohl(header.hdr_version),
923 ntohl(header.hdr_entries));
924 if (fetch_fsck_objects >= 0
925 ? fetch_fsck_objects
926 : transfer_fsck_objects >= 0
927 ? transfer_fsck_objects
928 : 0) {
929 if (args->from_promisor)
931 * We cannot use --strict in index-pack because it
932 * checks both broken objects and links, but we only
933 * want to check for broken objects.
935 argv_array_push(&cmd.args, "--fsck-objects");
936 else
937 argv_array_push(&cmd.args, "--strict");
940 cmd.in = demux.out;
941 cmd.git_cmd = 1;
942 if (start_command(&cmd))
943 die(_("fetch-pack: unable to fork off %s"), cmd_name);
944 if (do_keep && pack_lockfile) {
945 *pack_lockfile = index_pack_lockfile(cmd.out);
946 close(cmd.out);
949 if (!use_sideband)
950 /* Closed by start_command() */
951 xd[0] = -1;
953 ret = finish_command(&cmd);
954 if (!ret || (args->check_self_contained_and_connected && ret == 1))
955 args->self_contained_and_connected =
956 args->check_self_contained_and_connected &&
957 ret == 0;
958 else
959 die(_("%s failed"), cmd_name);
960 if (use_sideband && finish_async(&demux))
961 die(_("error in sideband demultiplexer"));
962 return 0;
965 static int cmp_ref_by_name(const void *a_, const void *b_)
967 const struct ref *a = *((const struct ref **)a_);
968 const struct ref *b = *((const struct ref **)b_);
969 return strcmp(a->name, b->name);
972 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
973 int fd[2],
974 const struct ref *orig_ref,
975 struct ref **sought, int nr_sought,
976 struct shallow_info *si,
977 char **pack_lockfile)
979 struct ref *ref = copy_ref_list(orig_ref);
980 struct object_id oid;
981 const char *agent_feature;
982 int agent_len;
984 sort_ref_list(&ref, ref_compare_name);
985 QSORT(sought, nr_sought, cmp_ref_by_name);
987 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
988 die(_("Server does not support shallow clients"));
989 if (args->depth > 0 || args->deepen_since || args->deepen_not)
990 args->deepen = 1;
991 if (server_supports("multi_ack_detailed")) {
992 print_verbose(args, _("Server supports multi_ack_detailed"));
993 multi_ack = 2;
994 if (server_supports("no-done")) {
995 print_verbose(args, _("Server supports no-done"));
996 if (args->stateless_rpc)
997 no_done = 1;
1000 else if (server_supports("multi_ack")) {
1001 print_verbose(args, _("Server supports multi_ack"));
1002 multi_ack = 1;
1004 if (server_supports("side-band-64k")) {
1005 print_verbose(args, _("Server supports side-band-64k"));
1006 use_sideband = 2;
1008 else if (server_supports("side-band")) {
1009 print_verbose(args, _("Server supports side-band"));
1010 use_sideband = 1;
1012 if (server_supports("allow-tip-sha1-in-want")) {
1013 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1014 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1016 if (server_supports("allow-reachable-sha1-in-want")) {
1017 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1018 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1020 if (!server_supports("thin-pack"))
1021 args->use_thin_pack = 0;
1022 if (!server_supports("no-progress"))
1023 args->no_progress = 0;
1024 if (!server_supports("include-tag"))
1025 args->include_tag = 0;
1026 if (server_supports("ofs-delta"))
1027 print_verbose(args, _("Server supports ofs-delta"));
1028 else
1029 prefer_ofs_delta = 0;
1031 if (server_supports("filter")) {
1032 server_supports_filtering = 1;
1033 print_verbose(args, _("Server supports filter"));
1034 } else if (args->filter_options.choice) {
1035 warning("filtering not recognized by server, ignoring");
1038 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1039 agent_supported = 1;
1040 if (agent_len)
1041 print_verbose(args, _("Server version is %.*s"),
1042 agent_len, agent_feature);
1044 if (server_supports("deepen-since"))
1045 deepen_since_ok = 1;
1046 else if (args->deepen_since)
1047 die(_("Server does not support --shallow-since"));
1048 if (server_supports("deepen-not"))
1049 deepen_not_ok = 1;
1050 else if (args->deepen_not)
1051 die(_("Server does not support --shallow-exclude"));
1052 if (!server_supports("deepen-relative") && args->deepen_relative)
1053 die(_("Server does not support --deepen"));
1055 if (everything_local(args, &ref, sought, nr_sought)) {
1056 packet_flush(fd[1]);
1057 goto all_done;
1059 if (find_common(args, fd, &oid, ref) < 0)
1060 if (!args->keep_pack)
1061 /* When cloning, it is not unusual to have
1062 * no common commit.
1064 warning(_("no common commits"));
1066 if (args->stateless_rpc)
1067 packet_flush(fd[1]);
1068 if (args->deepen)
1069 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1070 NULL);
1071 else if (si->nr_ours || si->nr_theirs)
1072 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1073 else
1074 alternate_shallow_file = NULL;
1075 if (get_pack(args, fd, pack_lockfile))
1076 die(_("git fetch-pack: fetch failed."));
1078 all_done:
1079 return ref;
1082 static void fetch_pack_config(void)
1084 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1085 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1086 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1087 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1088 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1090 git_config(git_default_config, NULL);
1093 static void fetch_pack_setup(void)
1095 static int did_setup;
1096 if (did_setup)
1097 return;
1098 fetch_pack_config();
1099 if (0 <= transfer_unpack_limit)
1100 unpack_limit = transfer_unpack_limit;
1101 else if (0 <= fetch_unpack_limit)
1102 unpack_limit = fetch_unpack_limit;
1103 did_setup = 1;
1106 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1108 struct string_list names = STRING_LIST_INIT_NODUP;
1109 int src, dst;
1111 for (src = dst = 0; src < nr; src++) {
1112 struct string_list_item *item;
1113 item = string_list_insert(&names, ref[src]->name);
1114 if (item->util)
1115 continue; /* already have it */
1116 item->util = ref[src];
1117 if (src != dst)
1118 ref[dst] = ref[src];
1119 dst++;
1121 for (src = dst; src < nr; src++)
1122 ref[src] = NULL;
1123 string_list_clear(&names, 0);
1124 return dst;
1127 static void update_shallow(struct fetch_pack_args *args,
1128 struct ref **sought, int nr_sought,
1129 struct shallow_info *si)
1131 struct oid_array ref = OID_ARRAY_INIT;
1132 int *status;
1133 int i;
1135 if (args->deepen && alternate_shallow_file) {
1136 if (*alternate_shallow_file == '\0') { /* --unshallow */
1137 unlink_or_warn(git_path_shallow());
1138 rollback_lock_file(&shallow_lock);
1139 } else
1140 commit_lock_file(&shallow_lock);
1141 return;
1144 if (!si->shallow || !si->shallow->nr)
1145 return;
1147 if (args->cloning) {
1149 * remote is shallow, but this is a clone, there are
1150 * no objects in repo to worry about. Accept any
1151 * shallow points that exist in the pack (iow in repo
1152 * after get_pack() and reprepare_packed_git())
1154 struct oid_array extra = OID_ARRAY_INIT;
1155 struct object_id *oid = si->shallow->oid;
1156 for (i = 0; i < si->shallow->nr; i++)
1157 if (has_object_file(&oid[i]))
1158 oid_array_append(&extra, &oid[i]);
1159 if (extra.nr) {
1160 setup_alternate_shallow(&shallow_lock,
1161 &alternate_shallow_file,
1162 &extra);
1163 commit_lock_file(&shallow_lock);
1165 oid_array_clear(&extra);
1166 return;
1169 if (!si->nr_ours && !si->nr_theirs)
1170 return;
1172 remove_nonexistent_theirs_shallow(si);
1173 if (!si->nr_ours && !si->nr_theirs)
1174 return;
1175 for (i = 0; i < nr_sought; i++)
1176 oid_array_append(&ref, &sought[i]->old_oid);
1177 si->ref = &ref;
1179 if (args->update_shallow) {
1181 * remote is also shallow, .git/shallow may be updated
1182 * so all refs can be accepted. Make sure we only add
1183 * shallow roots that are actually reachable from new
1184 * refs.
1186 struct oid_array extra = OID_ARRAY_INIT;
1187 struct object_id *oid = si->shallow->oid;
1188 assign_shallow_commits_to_refs(si, NULL, NULL);
1189 if (!si->nr_ours && !si->nr_theirs) {
1190 oid_array_clear(&ref);
1191 return;
1193 for (i = 0; i < si->nr_ours; i++)
1194 oid_array_append(&extra, &oid[si->ours[i]]);
1195 for (i = 0; i < si->nr_theirs; i++)
1196 oid_array_append(&extra, &oid[si->theirs[i]]);
1197 setup_alternate_shallow(&shallow_lock,
1198 &alternate_shallow_file,
1199 &extra);
1200 commit_lock_file(&shallow_lock);
1201 oid_array_clear(&extra);
1202 oid_array_clear(&ref);
1203 return;
1207 * remote is also shallow, check what ref is safe to update
1208 * without updating .git/shallow
1210 status = xcalloc(nr_sought, sizeof(*status));
1211 assign_shallow_commits_to_refs(si, NULL, status);
1212 if (si->nr_ours || si->nr_theirs) {
1213 for (i = 0; i < nr_sought; i++)
1214 if (status[i])
1215 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1217 free(status);
1218 oid_array_clear(&ref);
1221 struct ref *fetch_pack(struct fetch_pack_args *args,
1222 int fd[], struct child_process *conn,
1223 const struct ref *ref,
1224 const char *dest,
1225 struct ref **sought, int nr_sought,
1226 struct oid_array *shallow,
1227 char **pack_lockfile)
1229 struct ref *ref_cpy;
1230 struct shallow_info si;
1232 fetch_pack_setup();
1233 if (nr_sought)
1234 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1236 if (!ref) {
1237 packet_flush(fd[1]);
1238 die(_("no matching remote head"));
1240 prepare_shallow_info(&si, shallow);
1241 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1242 &si, pack_lockfile);
1243 reprepare_packed_git();
1244 update_shallow(args, sought, nr_sought, &si);
1245 clear_shallow_info(&si);
1246 return ref_cpy;
1249 int report_unmatched_refs(struct ref **sought, int nr_sought)
1251 int i, ret = 0;
1253 for (i = 0; i < nr_sought; i++) {
1254 if (!sought[i])
1255 continue;
1256 switch (sought[i]->match_status) {
1257 case REF_MATCHED:
1258 continue;
1259 case REF_NOT_MATCHED:
1260 error(_("no such remote ref %s"), sought[i]->name);
1261 break;
1262 case REF_UNADVERTISED_NOT_ALLOWED:
1263 error(_("Server does not allow request for unadvertised object %s"),
1264 sought[i]->name);
1265 break;
1267 ret = 1;
1269 return ret;