sha1_file: support lazily fetching missing objects
[git.git] / fetch-pack.c
blob0798e0b8b28538963c7dcd1d3f0bc50350ed531c
1 #include "cache.h"
2 #include "config.h"
3 #include "lockfile.h"
4 #include "refs.h"
5 #include "pkt-line.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "exec_cmd.h"
9 #include "pack.h"
10 #include "sideband.h"
11 #include "fetch-pack.h"
12 #include "remote.h"
13 #include "run-command.h"
14 #include "connect.h"
15 #include "transport.h"
16 #include "version.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
19 #include "oidset.h"
20 #include "packfile.h"
22 static int transfer_unpack_limit = -1;
23 static int fetch_unpack_limit = -1;
24 static int unpack_limit = 100;
25 static int prefer_ofs_delta = 1;
26 static int no_done;
27 static int deepen_since_ok;
28 static int deepen_not_ok;
29 static int fetch_fsck_objects = -1;
30 static int transfer_fsck_objects = -1;
31 static int agent_supported;
32 static struct lock_file shallow_lock;
33 static const char *alternate_shallow_file;
35 /* Remember to update object flag allocation in object.h */
36 #define COMPLETE (1U << 0)
37 #define COMMON (1U << 1)
38 #define COMMON_REF (1U << 2)
39 #define SEEN (1U << 3)
40 #define POPPED (1U << 4)
41 #define ALTERNATE (1U << 5)
43 static int marked;
46 * After sending this many "have"s if we do not get any new ACK , we
47 * give up traversing our history.
49 #define MAX_IN_VAIN 256
51 static struct prio_queue rev_list = { compare_commits_by_commit_date };
52 static int non_common_revs, multi_ack, use_sideband;
53 /* Allow specifying sha1 if it is a ref tip. */
54 #define ALLOW_TIP_SHA1 01
55 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
56 #define ALLOW_REACHABLE_SHA1 02
57 static unsigned int allow_unadvertised_object_request;
59 __attribute__((format (printf, 2, 3)))
60 static inline void print_verbose(const struct fetch_pack_args *args,
61 const char *fmt, ...)
63 va_list params;
65 if (!args->verbose)
66 return;
68 va_start(params, fmt);
69 vfprintf(stderr, fmt, params);
70 va_end(params);
71 fputc('\n', stderr);
74 struct alternate_object_cache {
75 struct object **items;
76 size_t nr, alloc;
79 static void cache_one_alternate(const char *refname,
80 const struct object_id *oid,
81 void *vcache)
83 struct alternate_object_cache *cache = vcache;
84 struct object *obj = parse_object(oid);
86 if (!obj || (obj->flags & ALTERNATE))
87 return;
89 obj->flags |= ALTERNATE;
90 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
91 cache->items[cache->nr++] = obj;
94 static void for_each_cached_alternate(void (*cb)(struct object *))
96 static int initialized;
97 static struct alternate_object_cache cache;
98 size_t i;
100 if (!initialized) {
101 for_each_alternate_ref(cache_one_alternate, &cache);
102 initialized = 1;
105 for (i = 0; i < cache.nr; i++)
106 cb(cache.items[i]);
109 static void rev_list_push(struct commit *commit, int mark)
111 if (!(commit->object.flags & mark)) {
112 commit->object.flags |= mark;
114 if (parse_commit(commit))
115 return;
117 prio_queue_put(&rev_list, commit);
119 if (!(commit->object.flags & COMMON))
120 non_common_revs++;
124 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
126 struct object *o = deref_tag(parse_object(oid), refname, 0);
128 if (o && o->type == OBJ_COMMIT)
129 rev_list_push((struct commit *)o, SEEN);
131 return 0;
134 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
135 int flag, void *cb_data)
137 return rev_list_insert_ref(refname, oid);
140 static int clear_marks(const char *refname, const struct object_id *oid,
141 int flag, void *cb_data)
143 struct object *o = deref_tag(parse_object(oid), refname, 0);
145 if (o && o->type == OBJ_COMMIT)
146 clear_commit_marks((struct commit *)o,
147 COMMON | COMMON_REF | SEEN | POPPED);
148 return 0;
152 This function marks a rev and its ancestors as common.
153 In some cases, it is desirable to mark only the ancestors (for example
154 when only the server does not yet know that they are common).
157 static void mark_common(struct commit *commit,
158 int ancestors_only, int dont_parse)
160 if (commit != NULL && !(commit->object.flags & COMMON)) {
161 struct object *o = (struct object *)commit;
163 if (!ancestors_only)
164 o->flags |= COMMON;
166 if (!(o->flags & SEEN))
167 rev_list_push(commit, SEEN);
168 else {
169 struct commit_list *parents;
171 if (!ancestors_only && !(o->flags & POPPED))
172 non_common_revs--;
173 if (!o->parsed && !dont_parse)
174 if (parse_commit(commit))
175 return;
177 for (parents = commit->parents;
178 parents;
179 parents = parents->next)
180 mark_common(parents->item, 0, dont_parse);
186 Get the next rev to send, ignoring the common.
189 static const struct object_id *get_rev(void)
191 struct commit *commit = NULL;
193 while (commit == NULL) {
194 unsigned int mark;
195 struct commit_list *parents;
197 if (rev_list.nr == 0 || non_common_revs == 0)
198 return NULL;
200 commit = prio_queue_get(&rev_list);
201 parse_commit(commit);
202 parents = commit->parents;
204 commit->object.flags |= POPPED;
205 if (!(commit->object.flags & COMMON))
206 non_common_revs--;
208 if (commit->object.flags & COMMON) {
209 /* do not send "have", and ignore ancestors */
210 commit = NULL;
211 mark = COMMON | SEEN;
212 } else if (commit->object.flags & COMMON_REF)
213 /* send "have", and ignore ancestors */
214 mark = COMMON | SEEN;
215 else
216 /* send "have", also for its ancestors */
217 mark = SEEN;
219 while (parents) {
220 if (!(parents->item->object.flags & SEEN))
221 rev_list_push(parents->item, mark);
222 if (mark & COMMON)
223 mark_common(parents->item, 1, 0);
224 parents = parents->next;
228 return &commit->object.oid;
231 enum ack_type {
232 NAK = 0,
233 ACK,
234 ACK_continue,
235 ACK_common,
236 ACK_ready
239 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
241 if (args->stateless_rpc && args->deepen) {
242 /* If we sent a depth we will get back "duplicate"
243 * shallow and unshallow commands every time there
244 * is a block of have lines exchanged.
246 char *line;
247 while ((line = packet_read_line(fd, NULL))) {
248 if (starts_with(line, "shallow "))
249 continue;
250 if (starts_with(line, "unshallow "))
251 continue;
252 die(_("git fetch-pack: expected shallow list"));
257 static enum ack_type get_ack(int fd, struct object_id *result_oid)
259 int len;
260 char *line = packet_read_line(fd, &len);
261 const char *arg;
263 if (!len)
264 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
265 if (!strcmp(line, "NAK"))
266 return NAK;
267 if (skip_prefix(line, "ACK ", &arg)) {
268 if (!get_oid_hex(arg, result_oid)) {
269 arg += 40;
270 len -= arg - line;
271 if (len < 1)
272 return ACK;
273 if (strstr(arg, "continue"))
274 return ACK_continue;
275 if (strstr(arg, "common"))
276 return ACK_common;
277 if (strstr(arg, "ready"))
278 return ACK_ready;
279 return ACK;
282 if (skip_prefix(line, "ERR ", &arg))
283 die(_("remote error: %s"), arg);
284 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
287 static void send_request(struct fetch_pack_args *args,
288 int fd, struct strbuf *buf)
290 if (args->stateless_rpc) {
291 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
292 packet_flush(fd);
293 } else
294 write_or_die(fd, buf->buf, buf->len);
297 static void insert_one_alternate_object(struct object *obj)
299 rev_list_insert_ref(NULL, &obj->oid);
302 #define INITIAL_FLUSH 16
303 #define PIPESAFE_FLUSH 32
304 #define LARGE_FLUSH 16384
306 static int next_flush(struct fetch_pack_args *args, int count)
308 if (args->stateless_rpc) {
309 if (count < LARGE_FLUSH)
310 count <<= 1;
311 else
312 count = count * 11 / 10;
313 } else {
314 if (count < PIPESAFE_FLUSH)
315 count <<= 1;
316 else
317 count += PIPESAFE_FLUSH;
319 return count;
322 static int find_common(struct fetch_pack_args *args,
323 int fd[2], struct object_id *result_oid,
324 struct ref *refs)
326 int fetching;
327 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
328 const struct object_id *oid;
329 unsigned in_vain = 0;
330 int got_continue = 0;
331 int got_ready = 0;
332 struct strbuf req_buf = STRBUF_INIT;
333 size_t state_len = 0;
335 if (args->stateless_rpc && multi_ack == 1)
336 die(_("--stateless-rpc requires multi_ack_detailed"));
337 if (marked)
338 for_each_ref(clear_marks, NULL);
339 marked = 1;
341 for_each_ref(rev_list_insert_ref_oid, NULL);
342 for_each_cached_alternate(insert_one_alternate_object);
344 fetching = 0;
345 for ( ; refs ; refs = refs->next) {
346 struct object_id *remote = &refs->old_oid;
347 const char *remote_hex;
348 struct object *o;
351 * If that object is complete (i.e. it is an ancestor of a
352 * local ref), we tell them we have it but do not have to
353 * tell them about its ancestors, which they already know
354 * about.
356 * We use lookup_object here because we are only
357 * interested in the case we *know* the object is
358 * reachable and we have already scanned it.
360 if (((o = lookup_object(remote->hash)) != NULL) &&
361 (o->flags & COMPLETE)) {
362 continue;
365 remote_hex = oid_to_hex(remote);
366 if (!fetching) {
367 struct strbuf c = STRBUF_INIT;
368 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
369 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
370 if (no_done) strbuf_addstr(&c, " no-done");
371 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
372 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
373 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
374 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
375 if (args->no_progress) strbuf_addstr(&c, " no-progress");
376 if (args->include_tag) strbuf_addstr(&c, " include-tag");
377 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
378 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
379 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
380 if (agent_supported) strbuf_addf(&c, " agent=%s",
381 git_user_agent_sanitized());
382 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
383 strbuf_release(&c);
384 } else
385 packet_buf_write(&req_buf, "want %s\n", remote_hex);
386 fetching++;
389 if (!fetching) {
390 strbuf_release(&req_buf);
391 packet_flush(fd[1]);
392 return 1;
395 if (is_repository_shallow())
396 write_shallow_commits(&req_buf, 1, NULL);
397 if (args->depth > 0)
398 packet_buf_write(&req_buf, "deepen %d", args->depth);
399 if (args->deepen_since) {
400 timestamp_t max_age = approxidate(args->deepen_since);
401 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
403 if (args->deepen_not) {
404 int i;
405 for (i = 0; i < args->deepen_not->nr; i++) {
406 struct string_list_item *s = args->deepen_not->items + i;
407 packet_buf_write(&req_buf, "deepen-not %s", s->string);
410 packet_buf_flush(&req_buf);
411 state_len = req_buf.len;
413 if (args->deepen) {
414 char *line;
415 const char *arg;
416 struct object_id oid;
418 send_request(args, fd[1], &req_buf);
419 while ((line = packet_read_line(fd[0], NULL))) {
420 if (skip_prefix(line, "shallow ", &arg)) {
421 if (get_oid_hex(arg, &oid))
422 die(_("invalid shallow line: %s"), line);
423 register_shallow(&oid);
424 continue;
426 if (skip_prefix(line, "unshallow ", &arg)) {
427 if (get_oid_hex(arg, &oid))
428 die(_("invalid unshallow line: %s"), line);
429 if (!lookup_object(oid.hash))
430 die(_("object not found: %s"), line);
431 /* make sure that it is parsed as shallow */
432 if (!parse_object(&oid))
433 die(_("error in object: %s"), line);
434 if (unregister_shallow(&oid))
435 die(_("no shallow found: %s"), line);
436 continue;
438 die(_("expected shallow/unshallow, got %s"), line);
440 } else if (!args->stateless_rpc)
441 send_request(args, fd[1], &req_buf);
443 if (!args->stateless_rpc) {
444 /* If we aren't using the stateless-rpc interface
445 * we don't need to retain the headers.
447 strbuf_setlen(&req_buf, 0);
448 state_len = 0;
451 flushes = 0;
452 retval = -1;
453 if (args->no_dependents)
454 goto done;
455 while ((oid = get_rev())) {
456 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
457 print_verbose(args, "have %s", oid_to_hex(oid));
458 in_vain++;
459 if (flush_at <= ++count) {
460 int ack;
462 packet_buf_flush(&req_buf);
463 send_request(args, fd[1], &req_buf);
464 strbuf_setlen(&req_buf, state_len);
465 flushes++;
466 flush_at = next_flush(args, count);
469 * We keep one window "ahead" of the other side, and
470 * will wait for an ACK only on the next one
472 if (!args->stateless_rpc && count == INITIAL_FLUSH)
473 continue;
475 consume_shallow_list(args, fd[0]);
476 do {
477 ack = get_ack(fd[0], result_oid);
478 if (ack)
479 print_verbose(args, _("got %s %d %s"), "ack",
480 ack, oid_to_hex(result_oid));
481 switch (ack) {
482 case ACK:
483 flushes = 0;
484 multi_ack = 0;
485 retval = 0;
486 goto done;
487 case ACK_common:
488 case ACK_ready:
489 case ACK_continue: {
490 struct commit *commit =
491 lookup_commit(result_oid);
492 if (!commit)
493 die(_("invalid commit %s"), oid_to_hex(result_oid));
494 if (args->stateless_rpc
495 && ack == ACK_common
496 && !(commit->object.flags & COMMON)) {
497 /* We need to replay the have for this object
498 * on the next RPC request so the peer knows
499 * it is in common with us.
501 const char *hex = oid_to_hex(result_oid);
502 packet_buf_write(&req_buf, "have %s\n", hex);
503 state_len = req_buf.len;
505 * Reset in_vain because an ack
506 * for this commit has not been
507 * seen.
509 in_vain = 0;
510 } else if (!args->stateless_rpc
511 || ack != ACK_common)
512 in_vain = 0;
513 mark_common(commit, 0, 1);
514 retval = 0;
515 got_continue = 1;
516 if (ack == ACK_ready) {
517 clear_prio_queue(&rev_list);
518 got_ready = 1;
520 break;
523 } while (ack);
524 flushes--;
525 if (got_continue && MAX_IN_VAIN < in_vain) {
526 print_verbose(args, _("giving up"));
527 break; /* give up */
531 done:
532 if (!got_ready || !no_done) {
533 packet_buf_write(&req_buf, "done\n");
534 send_request(args, fd[1], &req_buf);
536 print_verbose(args, _("done"));
537 if (retval != 0) {
538 multi_ack = 0;
539 flushes++;
541 strbuf_release(&req_buf);
543 if (!got_ready || !no_done)
544 consume_shallow_list(args, fd[0]);
545 while (flushes || multi_ack) {
546 int ack = get_ack(fd[0], result_oid);
547 if (ack) {
548 print_verbose(args, _("got %s (%d) %s"), "ack",
549 ack, oid_to_hex(result_oid));
550 if (ack == ACK)
551 return 0;
552 multi_ack = 1;
553 continue;
555 flushes--;
557 /* it is no error to fetch into a completely empty repo */
558 return count ? retval : 0;
561 static struct commit_list *complete;
563 static int mark_complete(const struct object_id *oid)
565 struct object *o = parse_object(oid);
567 while (o && o->type == OBJ_TAG) {
568 struct tag *t = (struct tag *) o;
569 if (!t->tagged)
570 break; /* broken repository */
571 o->flags |= COMPLETE;
572 o = parse_object(&t->tagged->oid);
574 if (o && o->type == OBJ_COMMIT) {
575 struct commit *commit = (struct commit *)o;
576 if (!(commit->object.flags & COMPLETE)) {
577 commit->object.flags |= COMPLETE;
578 commit_list_insert(commit, &complete);
581 return 0;
584 static int mark_complete_oid(const char *refname, const struct object_id *oid,
585 int flag, void *cb_data)
587 return mark_complete(oid);
590 static void mark_recent_complete_commits(struct fetch_pack_args *args,
591 timestamp_t cutoff)
593 while (complete && cutoff <= complete->item->date) {
594 print_verbose(args, _("Marking %s as complete"),
595 oid_to_hex(&complete->item->object.oid));
596 pop_most_recent_commit(&complete, COMPLETE);
600 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
602 for (; refs; refs = refs->next)
603 oidset_insert(oids, &refs->old_oid);
606 static int tip_oids_contain(struct oidset *tip_oids,
607 struct ref *unmatched, struct ref *newlist,
608 const struct object_id *id)
611 * Note that this only looks at the ref lists the first time it's
612 * called. This works out in filter_refs() because even though it may
613 * add to "newlist" between calls, the additions will always be for
614 * oids that are already in the set.
616 if (!tip_oids->map.map.tablesize) {
617 add_refs_to_oidset(tip_oids, unmatched);
618 add_refs_to_oidset(tip_oids, newlist);
620 return oidset_contains(tip_oids, id);
623 static void filter_refs(struct fetch_pack_args *args,
624 struct ref **refs,
625 struct ref **sought, int nr_sought)
627 struct ref *newlist = NULL;
628 struct ref **newtail = &newlist;
629 struct ref *unmatched = NULL;
630 struct ref *ref, *next;
631 struct oidset tip_oids = OIDSET_INIT;
632 int i;
634 i = 0;
635 for (ref = *refs; ref; ref = next) {
636 int keep = 0;
637 next = ref->next;
639 if (starts_with(ref->name, "refs/") &&
640 check_refname_format(ref->name, 0))
641 ; /* trash */
642 else {
643 while (i < nr_sought) {
644 int cmp = strcmp(ref->name, sought[i]->name);
645 if (cmp < 0)
646 break; /* definitely do not have it */
647 else if (cmp == 0) {
648 keep = 1; /* definitely have it */
649 sought[i]->match_status = REF_MATCHED;
651 i++;
655 if (!keep && args->fetch_all &&
656 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
657 keep = 1;
659 if (keep) {
660 *newtail = ref;
661 ref->next = NULL;
662 newtail = &ref->next;
663 } else {
664 ref->next = unmatched;
665 unmatched = ref;
669 /* Append unmatched requests to the list */
670 for (i = 0; i < nr_sought; i++) {
671 struct object_id oid;
672 const char *p;
674 ref = sought[i];
675 if (ref->match_status != REF_NOT_MATCHED)
676 continue;
677 if (parse_oid_hex(ref->name, &oid, &p) ||
678 *p != '\0' ||
679 oidcmp(&oid, &ref->old_oid))
680 continue;
682 if ((allow_unadvertised_object_request &
683 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
684 tip_oids_contain(&tip_oids, unmatched, newlist,
685 &ref->old_oid)) {
686 ref->match_status = REF_MATCHED;
687 *newtail = copy_ref(ref);
688 newtail = &(*newtail)->next;
689 } else {
690 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
694 oidset_clear(&tip_oids);
695 for (ref = unmatched; ref; ref = next) {
696 next = ref->next;
697 free(ref);
700 *refs = newlist;
703 static void mark_alternate_complete(struct object *obj)
705 mark_complete(&obj->oid);
708 static int everything_local(struct fetch_pack_args *args,
709 struct ref **refs,
710 struct ref **sought, int nr_sought)
712 struct ref *ref;
713 int retval;
714 timestamp_t cutoff = 0;
716 save_commit_buffer = 0;
718 for (ref = *refs; ref; ref = ref->next) {
719 struct object *o;
721 if (!has_object_file(&ref->old_oid))
722 continue;
724 o = parse_object(&ref->old_oid);
725 if (!o)
726 continue;
728 /* We already have it -- which may mean that we were
729 * in sync with the other side at some time after
730 * that (it is OK if we guess wrong here).
732 if (o->type == OBJ_COMMIT) {
733 struct commit *commit = (struct commit *)o;
734 if (!cutoff || cutoff < commit->date)
735 cutoff = commit->date;
739 if (!args->no_dependents) {
740 if (!args->deepen) {
741 for_each_ref(mark_complete_oid, NULL);
742 for_each_cached_alternate(mark_alternate_complete);
743 commit_list_sort_by_date(&complete);
744 if (cutoff)
745 mark_recent_complete_commits(args, cutoff);
749 * Mark all complete remote refs as common refs.
750 * Don't mark them common yet; the server has to be told so first.
752 for (ref = *refs; ref; ref = ref->next) {
753 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
754 NULL, 0);
756 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
757 continue;
759 if (!(o->flags & SEEN)) {
760 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
762 mark_common((struct commit *)o, 1, 1);
767 filter_refs(args, refs, sought, nr_sought);
769 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
770 const struct object_id *remote = &ref->old_oid;
771 struct object *o;
773 o = lookup_object(remote->hash);
774 if (!o || !(o->flags & COMPLETE)) {
775 retval = 0;
776 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
777 ref->name);
778 continue;
780 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
781 ref->name);
783 return retval;
786 static int sideband_demux(int in, int out, void *data)
788 int *xd = data;
789 int ret;
791 ret = recv_sideband("fetch-pack", xd[0], out);
792 close(out);
793 return ret;
796 static int get_pack(struct fetch_pack_args *args,
797 int xd[2], char **pack_lockfile)
799 struct async demux;
800 int do_keep = args->keep_pack;
801 const char *cmd_name;
802 struct pack_header header;
803 int pass_header = 0;
804 struct child_process cmd = CHILD_PROCESS_INIT;
805 int ret;
807 memset(&demux, 0, sizeof(demux));
808 if (use_sideband) {
809 /* xd[] is talking with upload-pack; subprocess reads from
810 * xd[0], spits out band#2 to stderr, and feeds us band#1
811 * through demux->out.
813 demux.proc = sideband_demux;
814 demux.data = xd;
815 demux.out = -1;
816 demux.isolate_sigpipe = 1;
817 if (start_async(&demux))
818 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
820 else
821 demux.out = xd[0];
823 if (!args->keep_pack && unpack_limit) {
825 if (read_pack_header(demux.out, &header))
826 die(_("protocol error: bad pack header"));
827 pass_header = 1;
828 if (ntohl(header.hdr_entries) < unpack_limit)
829 do_keep = 0;
830 else
831 do_keep = 1;
834 if (alternate_shallow_file) {
835 argv_array_push(&cmd.args, "--shallow-file");
836 argv_array_push(&cmd.args, alternate_shallow_file);
839 if (do_keep || args->from_promisor) {
840 if (pack_lockfile)
841 cmd.out = -1;
842 cmd_name = "index-pack";
843 argv_array_push(&cmd.args, cmd_name);
844 argv_array_push(&cmd.args, "--stdin");
845 if (!args->quiet && !args->no_progress)
846 argv_array_push(&cmd.args, "-v");
847 if (args->use_thin_pack)
848 argv_array_push(&cmd.args, "--fix-thin");
849 if (do_keep && (args->lock_pack || unpack_limit)) {
850 char hostname[HOST_NAME_MAX + 1];
851 if (xgethostname(hostname, sizeof(hostname)))
852 xsnprintf(hostname, sizeof(hostname), "localhost");
853 argv_array_pushf(&cmd.args,
854 "--keep=fetch-pack %"PRIuMAX " on %s",
855 (uintmax_t)getpid(), hostname);
857 if (args->check_self_contained_and_connected)
858 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
859 if (args->from_promisor)
860 argv_array_push(&cmd.args, "--promisor");
862 else {
863 cmd_name = "unpack-objects";
864 argv_array_push(&cmd.args, cmd_name);
865 if (args->quiet || args->no_progress)
866 argv_array_push(&cmd.args, "-q");
867 args->check_self_contained_and_connected = 0;
870 if (pass_header)
871 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
872 ntohl(header.hdr_version),
873 ntohl(header.hdr_entries));
874 if (fetch_fsck_objects >= 0
875 ? fetch_fsck_objects
876 : transfer_fsck_objects >= 0
877 ? transfer_fsck_objects
878 : 0)
879 argv_array_push(&cmd.args, "--strict");
881 cmd.in = demux.out;
882 cmd.git_cmd = 1;
883 if (start_command(&cmd))
884 die(_("fetch-pack: unable to fork off %s"), cmd_name);
885 if (do_keep && pack_lockfile) {
886 *pack_lockfile = index_pack_lockfile(cmd.out);
887 close(cmd.out);
890 if (!use_sideband)
891 /* Closed by start_command() */
892 xd[0] = -1;
894 ret = finish_command(&cmd);
895 if (!ret || (args->check_self_contained_and_connected && ret == 1))
896 args->self_contained_and_connected =
897 args->check_self_contained_and_connected &&
898 ret == 0;
899 else
900 die(_("%s failed"), cmd_name);
901 if (use_sideband && finish_async(&demux))
902 die(_("error in sideband demultiplexer"));
903 return 0;
906 static int cmp_ref_by_name(const void *a_, const void *b_)
908 const struct ref *a = *((const struct ref **)a_);
909 const struct ref *b = *((const struct ref **)b_);
910 return strcmp(a->name, b->name);
913 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
914 int fd[2],
915 const struct ref *orig_ref,
916 struct ref **sought, int nr_sought,
917 struct shallow_info *si,
918 char **pack_lockfile)
920 struct ref *ref = copy_ref_list(orig_ref);
921 struct object_id oid;
922 const char *agent_feature;
923 int agent_len;
925 sort_ref_list(&ref, ref_compare_name);
926 QSORT(sought, nr_sought, cmp_ref_by_name);
928 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
929 die(_("Server does not support shallow clients"));
930 if (args->depth > 0 || args->deepen_since || args->deepen_not)
931 args->deepen = 1;
932 if (server_supports("multi_ack_detailed")) {
933 print_verbose(args, _("Server supports multi_ack_detailed"));
934 multi_ack = 2;
935 if (server_supports("no-done")) {
936 print_verbose(args, _("Server supports no-done"));
937 if (args->stateless_rpc)
938 no_done = 1;
941 else if (server_supports("multi_ack")) {
942 print_verbose(args, _("Server supports multi_ack"));
943 multi_ack = 1;
945 if (server_supports("side-band-64k")) {
946 print_verbose(args, _("Server supports side-band-64k"));
947 use_sideband = 2;
949 else if (server_supports("side-band")) {
950 print_verbose(args, _("Server supports side-band"));
951 use_sideband = 1;
953 if (server_supports("allow-tip-sha1-in-want")) {
954 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
955 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
957 if (server_supports("allow-reachable-sha1-in-want")) {
958 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
959 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
961 if (!server_supports("thin-pack"))
962 args->use_thin_pack = 0;
963 if (!server_supports("no-progress"))
964 args->no_progress = 0;
965 if (!server_supports("include-tag"))
966 args->include_tag = 0;
967 if (server_supports("ofs-delta"))
968 print_verbose(args, _("Server supports ofs-delta"));
969 else
970 prefer_ofs_delta = 0;
972 if ((agent_feature = server_feature_value("agent", &agent_len))) {
973 agent_supported = 1;
974 if (agent_len)
975 print_verbose(args, _("Server version is %.*s"),
976 agent_len, agent_feature);
978 if (server_supports("deepen-since"))
979 deepen_since_ok = 1;
980 else if (args->deepen_since)
981 die(_("Server does not support --shallow-since"));
982 if (server_supports("deepen-not"))
983 deepen_not_ok = 1;
984 else if (args->deepen_not)
985 die(_("Server does not support --shallow-exclude"));
986 if (!server_supports("deepen-relative") && args->deepen_relative)
987 die(_("Server does not support --deepen"));
989 if (everything_local(args, &ref, sought, nr_sought)) {
990 packet_flush(fd[1]);
991 goto all_done;
993 if (find_common(args, fd, &oid, ref) < 0)
994 if (!args->keep_pack)
995 /* When cloning, it is not unusual to have
996 * no common commit.
998 warning(_("no common commits"));
1000 if (args->stateless_rpc)
1001 packet_flush(fd[1]);
1002 if (args->deepen)
1003 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1004 NULL);
1005 else if (si->nr_ours || si->nr_theirs)
1006 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1007 else
1008 alternate_shallow_file = NULL;
1009 if (get_pack(args, fd, pack_lockfile))
1010 die(_("git fetch-pack: fetch failed."));
1012 all_done:
1013 return ref;
1016 static void fetch_pack_config(void)
1018 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1019 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1020 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1021 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1022 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1024 git_config(git_default_config, NULL);
1027 static void fetch_pack_setup(void)
1029 static int did_setup;
1030 if (did_setup)
1031 return;
1032 fetch_pack_config();
1033 if (0 <= transfer_unpack_limit)
1034 unpack_limit = transfer_unpack_limit;
1035 else if (0 <= fetch_unpack_limit)
1036 unpack_limit = fetch_unpack_limit;
1037 did_setup = 1;
1040 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1042 struct string_list names = STRING_LIST_INIT_NODUP;
1043 int src, dst;
1045 for (src = dst = 0; src < nr; src++) {
1046 struct string_list_item *item;
1047 item = string_list_insert(&names, ref[src]->name);
1048 if (item->util)
1049 continue; /* already have it */
1050 item->util = ref[src];
1051 if (src != dst)
1052 ref[dst] = ref[src];
1053 dst++;
1055 for (src = dst; src < nr; src++)
1056 ref[src] = NULL;
1057 string_list_clear(&names, 0);
1058 return dst;
1061 static void update_shallow(struct fetch_pack_args *args,
1062 struct ref **sought, int nr_sought,
1063 struct shallow_info *si)
1065 struct oid_array ref = OID_ARRAY_INIT;
1066 int *status;
1067 int i;
1069 if (args->deepen && alternate_shallow_file) {
1070 if (*alternate_shallow_file == '\0') { /* --unshallow */
1071 unlink_or_warn(git_path_shallow());
1072 rollback_lock_file(&shallow_lock);
1073 } else
1074 commit_lock_file(&shallow_lock);
1075 return;
1078 if (!si->shallow || !si->shallow->nr)
1079 return;
1081 if (args->cloning) {
1083 * remote is shallow, but this is a clone, there are
1084 * no objects in repo to worry about. Accept any
1085 * shallow points that exist in the pack (iow in repo
1086 * after get_pack() and reprepare_packed_git())
1088 struct oid_array extra = OID_ARRAY_INIT;
1089 struct object_id *oid = si->shallow->oid;
1090 for (i = 0; i < si->shallow->nr; i++)
1091 if (has_object_file(&oid[i]))
1092 oid_array_append(&extra, &oid[i]);
1093 if (extra.nr) {
1094 setup_alternate_shallow(&shallow_lock,
1095 &alternate_shallow_file,
1096 &extra);
1097 commit_lock_file(&shallow_lock);
1099 oid_array_clear(&extra);
1100 return;
1103 if (!si->nr_ours && !si->nr_theirs)
1104 return;
1106 remove_nonexistent_theirs_shallow(si);
1107 if (!si->nr_ours && !si->nr_theirs)
1108 return;
1109 for (i = 0; i < nr_sought; i++)
1110 oid_array_append(&ref, &sought[i]->old_oid);
1111 si->ref = &ref;
1113 if (args->update_shallow) {
1115 * remote is also shallow, .git/shallow may be updated
1116 * so all refs can be accepted. Make sure we only add
1117 * shallow roots that are actually reachable from new
1118 * refs.
1120 struct oid_array extra = OID_ARRAY_INIT;
1121 struct object_id *oid = si->shallow->oid;
1122 assign_shallow_commits_to_refs(si, NULL, NULL);
1123 if (!si->nr_ours && !si->nr_theirs) {
1124 oid_array_clear(&ref);
1125 return;
1127 for (i = 0; i < si->nr_ours; i++)
1128 oid_array_append(&extra, &oid[si->ours[i]]);
1129 for (i = 0; i < si->nr_theirs; i++)
1130 oid_array_append(&extra, &oid[si->theirs[i]]);
1131 setup_alternate_shallow(&shallow_lock,
1132 &alternate_shallow_file,
1133 &extra);
1134 commit_lock_file(&shallow_lock);
1135 oid_array_clear(&extra);
1136 oid_array_clear(&ref);
1137 return;
1141 * remote is also shallow, check what ref is safe to update
1142 * without updating .git/shallow
1144 status = xcalloc(nr_sought, sizeof(*status));
1145 assign_shallow_commits_to_refs(si, NULL, status);
1146 if (si->nr_ours || si->nr_theirs) {
1147 for (i = 0; i < nr_sought; i++)
1148 if (status[i])
1149 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1151 free(status);
1152 oid_array_clear(&ref);
1155 struct ref *fetch_pack(struct fetch_pack_args *args,
1156 int fd[], struct child_process *conn,
1157 const struct ref *ref,
1158 const char *dest,
1159 struct ref **sought, int nr_sought,
1160 struct oid_array *shallow,
1161 char **pack_lockfile)
1163 struct ref *ref_cpy;
1164 struct shallow_info si;
1166 fetch_pack_setup();
1167 if (nr_sought)
1168 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1170 if (!ref) {
1171 packet_flush(fd[1]);
1172 die(_("no matching remote head"));
1174 prepare_shallow_info(&si, shallow);
1175 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1176 &si, pack_lockfile);
1177 reprepare_packed_git();
1178 update_shallow(args, sought, nr_sought, &si);
1179 clear_shallow_info(&si);
1180 return ref_cpy;
1183 int report_unmatched_refs(struct ref **sought, int nr_sought)
1185 int i, ret = 0;
1187 for (i = 0; i < nr_sought; i++) {
1188 if (!sought[i])
1189 continue;
1190 switch (sought[i]->match_status) {
1191 case REF_MATCHED:
1192 continue;
1193 case REF_NOT_MATCHED:
1194 error(_("no such remote ref %s"), sought[i]->name);
1195 break;
1196 case REF_UNADVERTISED_NOT_ALLOWED:
1197 error(_("Server does not allow request for unadvertised object %s"),
1198 sought[i]->name);
1199 break;
1201 ret = 1;
1203 return ret;