submodule sync: skip work for inactive submodules
[git.git] / fetch-pack.c
blobe0f5d5ce875acad79b5ddcfc4e24b6ff8bc33df9
1 #include "cache.h"
2 #include "lockfile.h"
3 #include "refs.h"
4 #include "pkt-line.h"
5 #include "commit.h"
6 #include "tag.h"
7 #include "exec_cmd.h"
8 #include "pack.h"
9 #include "sideband.h"
10 #include "fetch-pack.h"
11 #include "remote.h"
12 #include "run-command.h"
13 #include "connect.h"
14 #include "transport.h"
15 #include "version.h"
16 #include "prio-queue.h"
17 #include "sha1-array.h"
19 static int transfer_unpack_limit = -1;
20 static int fetch_unpack_limit = -1;
21 static int unpack_limit = 100;
22 static int prefer_ofs_delta = 1;
23 static int no_done;
24 static int deepen_since_ok;
25 static int deepen_not_ok;
26 static int fetch_fsck_objects = -1;
27 static int transfer_fsck_objects = -1;
28 static int agent_supported;
29 static struct lock_file shallow_lock;
30 static const char *alternate_shallow_file;
32 /* Remember to update object flag allocation in object.h */
33 #define COMPLETE (1U << 0)
34 #define COMMON (1U << 1)
35 #define COMMON_REF (1U << 2)
36 #define SEEN (1U << 3)
37 #define POPPED (1U << 4)
38 #define ALTERNATE (1U << 5)
40 static int marked;
43 * After sending this many "have"s if we do not get any new ACK , we
44 * give up traversing our history.
46 #define MAX_IN_VAIN 256
48 static struct prio_queue rev_list = { compare_commits_by_commit_date };
49 static int non_common_revs, multi_ack, use_sideband;
50 /* Allow specifying sha1 if it is a ref tip. */
51 #define ALLOW_TIP_SHA1 01
52 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
53 #define ALLOW_REACHABLE_SHA1 02
54 static unsigned int allow_unadvertised_object_request;
56 __attribute__((format (printf, 2, 3)))
57 static inline void print_verbose(const struct fetch_pack_args *args,
58 const char *fmt, ...)
60 va_list params;
62 if (!args->verbose)
63 return;
65 va_start(params, fmt);
66 vfprintf(stderr, fmt, params);
67 va_end(params);
68 fputc('\n', stderr);
71 struct alternate_object_cache {
72 struct object **items;
73 size_t nr, alloc;
76 static void cache_one_alternate(const char *refname,
77 const struct object_id *oid,
78 void *vcache)
80 struct alternate_object_cache *cache = vcache;
81 struct object *obj = parse_object(oid->hash);
83 if (!obj || (obj->flags & ALTERNATE))
84 return;
86 obj->flags |= ALTERNATE;
87 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
88 cache->items[cache->nr++] = obj;
91 static void for_each_cached_alternate(void (*cb)(struct object *))
93 static int initialized;
94 static struct alternate_object_cache cache;
95 size_t i;
97 if (!initialized) {
98 for_each_alternate_ref(cache_one_alternate, &cache);
99 initialized = 1;
102 for (i = 0; i < cache.nr; i++)
103 cb(cache.items[i]);
106 static void rev_list_push(struct commit *commit, int mark)
108 if (!(commit->object.flags & mark)) {
109 commit->object.flags |= mark;
111 if (parse_commit(commit))
112 return;
114 prio_queue_put(&rev_list, commit);
116 if (!(commit->object.flags & COMMON))
117 non_common_revs++;
121 static int rev_list_insert_ref(const char *refname, const unsigned char *sha1)
123 struct object *o = deref_tag(parse_object(sha1), refname, 0);
125 if (o && o->type == OBJ_COMMIT)
126 rev_list_push((struct commit *)o, SEEN);
128 return 0;
131 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
132 int flag, void *cb_data)
134 return rev_list_insert_ref(refname, oid->hash);
137 static int clear_marks(const char *refname, const struct object_id *oid,
138 int flag, void *cb_data)
140 struct object *o = deref_tag(parse_object(oid->hash), refname, 0);
142 if (o && o->type == OBJ_COMMIT)
143 clear_commit_marks((struct commit *)o,
144 COMMON | COMMON_REF | SEEN | POPPED);
145 return 0;
149 This function marks a rev and its ancestors as common.
150 In some cases, it is desirable to mark only the ancestors (for example
151 when only the server does not yet know that they are common).
154 static void mark_common(struct commit *commit,
155 int ancestors_only, int dont_parse)
157 if (commit != NULL && !(commit->object.flags & COMMON)) {
158 struct object *o = (struct object *)commit;
160 if (!ancestors_only)
161 o->flags |= COMMON;
163 if (!(o->flags & SEEN))
164 rev_list_push(commit, SEEN);
165 else {
166 struct commit_list *parents;
168 if (!ancestors_only && !(o->flags & POPPED))
169 non_common_revs--;
170 if (!o->parsed && !dont_parse)
171 if (parse_commit(commit))
172 return;
174 for (parents = commit->parents;
175 parents;
176 parents = parents->next)
177 mark_common(parents->item, 0, dont_parse);
183 Get the next rev to send, ignoring the common.
186 static const unsigned char *get_rev(void)
188 struct commit *commit = NULL;
190 while (commit == NULL) {
191 unsigned int mark;
192 struct commit_list *parents;
194 if (rev_list.nr == 0 || non_common_revs == 0)
195 return NULL;
197 commit = prio_queue_get(&rev_list);
198 parse_commit(commit);
199 parents = commit->parents;
201 commit->object.flags |= POPPED;
202 if (!(commit->object.flags & COMMON))
203 non_common_revs--;
205 if (commit->object.flags & COMMON) {
206 /* do not send "have", and ignore ancestors */
207 commit = NULL;
208 mark = COMMON | SEEN;
209 } else if (commit->object.flags & COMMON_REF)
210 /* send "have", and ignore ancestors */
211 mark = COMMON | SEEN;
212 else
213 /* send "have", also for its ancestors */
214 mark = SEEN;
216 while (parents) {
217 if (!(parents->item->object.flags & SEEN))
218 rev_list_push(parents->item, mark);
219 if (mark & COMMON)
220 mark_common(parents->item, 1, 0);
221 parents = parents->next;
225 return commit->object.oid.hash;
228 enum ack_type {
229 NAK = 0,
230 ACK,
231 ACK_continue,
232 ACK_common,
233 ACK_ready
236 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
238 if (args->stateless_rpc && args->deepen) {
239 /* If we sent a depth we will get back "duplicate"
240 * shallow and unshallow commands every time there
241 * is a block of have lines exchanged.
243 char *line;
244 while ((line = packet_read_line(fd, NULL))) {
245 if (starts_with(line, "shallow "))
246 continue;
247 if (starts_with(line, "unshallow "))
248 continue;
249 die(_("git fetch-pack: expected shallow list"));
254 static enum ack_type get_ack(int fd, unsigned char *result_sha1)
256 int len;
257 char *line = packet_read_line(fd, &len);
258 const char *arg;
260 if (!len)
261 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
262 if (!strcmp(line, "NAK"))
263 return NAK;
264 if (skip_prefix(line, "ACK ", &arg)) {
265 if (!get_sha1_hex(arg, result_sha1)) {
266 arg += 40;
267 len -= arg - line;
268 if (len < 1)
269 return ACK;
270 if (strstr(arg, "continue"))
271 return ACK_continue;
272 if (strstr(arg, "common"))
273 return ACK_common;
274 if (strstr(arg, "ready"))
275 return ACK_ready;
276 return ACK;
279 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
282 static void send_request(struct fetch_pack_args *args,
283 int fd, struct strbuf *buf)
285 if (args->stateless_rpc) {
286 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
287 packet_flush(fd);
288 } else
289 write_or_die(fd, buf->buf, buf->len);
292 static void insert_one_alternate_object(struct object *obj)
294 rev_list_insert_ref(NULL, obj->oid.hash);
297 #define INITIAL_FLUSH 16
298 #define PIPESAFE_FLUSH 32
299 #define LARGE_FLUSH 16384
301 static int next_flush(struct fetch_pack_args *args, int count)
303 if (args->stateless_rpc) {
304 if (count < LARGE_FLUSH)
305 count <<= 1;
306 else
307 count = count * 11 / 10;
308 } else {
309 if (count < PIPESAFE_FLUSH)
310 count <<= 1;
311 else
312 count += PIPESAFE_FLUSH;
314 return count;
317 static int find_common(struct fetch_pack_args *args,
318 int fd[2], unsigned char *result_sha1,
319 struct ref *refs)
321 int fetching;
322 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
323 const unsigned char *sha1;
324 unsigned in_vain = 0;
325 int got_continue = 0;
326 int got_ready = 0;
327 struct strbuf req_buf = STRBUF_INIT;
328 size_t state_len = 0;
330 if (args->stateless_rpc && multi_ack == 1)
331 die(_("--stateless-rpc requires multi_ack_detailed"));
332 if (marked)
333 for_each_ref(clear_marks, NULL);
334 marked = 1;
336 for_each_ref(rev_list_insert_ref_oid, NULL);
337 for_each_cached_alternate(insert_one_alternate_object);
339 fetching = 0;
340 for ( ; refs ; refs = refs->next) {
341 unsigned char *remote = refs->old_oid.hash;
342 const char *remote_hex;
343 struct object *o;
346 * If that object is complete (i.e. it is an ancestor of a
347 * local ref), we tell them we have it but do not have to
348 * tell them about its ancestors, which they already know
349 * about.
351 * We use lookup_object here because we are only
352 * interested in the case we *know* the object is
353 * reachable and we have already scanned it.
355 if (((o = lookup_object(remote)) != NULL) &&
356 (o->flags & COMPLETE)) {
357 continue;
360 remote_hex = sha1_to_hex(remote);
361 if (!fetching) {
362 struct strbuf c = STRBUF_INIT;
363 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
364 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
365 if (no_done) strbuf_addstr(&c, " no-done");
366 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
367 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
368 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
369 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
370 if (args->no_progress) strbuf_addstr(&c, " no-progress");
371 if (args->include_tag) strbuf_addstr(&c, " include-tag");
372 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
373 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
374 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
375 if (agent_supported) strbuf_addf(&c, " agent=%s",
376 git_user_agent_sanitized());
377 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
378 strbuf_release(&c);
379 } else
380 packet_buf_write(&req_buf, "want %s\n", remote_hex);
381 fetching++;
384 if (!fetching) {
385 strbuf_release(&req_buf);
386 packet_flush(fd[1]);
387 return 1;
390 if (is_repository_shallow())
391 write_shallow_commits(&req_buf, 1, NULL);
392 if (args->depth > 0)
393 packet_buf_write(&req_buf, "deepen %d", args->depth);
394 if (args->deepen_since) {
395 unsigned long max_age = approxidate(args->deepen_since);
396 packet_buf_write(&req_buf, "deepen-since %lu", max_age);
398 if (args->deepen_not) {
399 int i;
400 for (i = 0; i < args->deepen_not->nr; i++) {
401 struct string_list_item *s = args->deepen_not->items + i;
402 packet_buf_write(&req_buf, "deepen-not %s", s->string);
405 packet_buf_flush(&req_buf);
406 state_len = req_buf.len;
408 if (args->deepen) {
409 char *line;
410 const char *arg;
411 unsigned char sha1[20];
413 send_request(args, fd[1], &req_buf);
414 while ((line = packet_read_line(fd[0], NULL))) {
415 if (skip_prefix(line, "shallow ", &arg)) {
416 if (get_sha1_hex(arg, sha1))
417 die(_("invalid shallow line: %s"), line);
418 register_shallow(sha1);
419 continue;
421 if (skip_prefix(line, "unshallow ", &arg)) {
422 if (get_sha1_hex(arg, sha1))
423 die(_("invalid unshallow line: %s"), line);
424 if (!lookup_object(sha1))
425 die(_("object not found: %s"), line);
426 /* make sure that it is parsed as shallow */
427 if (!parse_object(sha1))
428 die(_("error in object: %s"), line);
429 if (unregister_shallow(sha1))
430 die(_("no shallow found: %s"), line);
431 continue;
433 die(_("expected shallow/unshallow, got %s"), line);
435 } else if (!args->stateless_rpc)
436 send_request(args, fd[1], &req_buf);
438 if (!args->stateless_rpc) {
439 /* If we aren't using the stateless-rpc interface
440 * we don't need to retain the headers.
442 strbuf_setlen(&req_buf, 0);
443 state_len = 0;
446 flushes = 0;
447 retval = -1;
448 while ((sha1 = get_rev())) {
449 packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
450 print_verbose(args, "have %s", sha1_to_hex(sha1));
451 in_vain++;
452 if (flush_at <= ++count) {
453 int ack;
455 packet_buf_flush(&req_buf);
456 send_request(args, fd[1], &req_buf);
457 strbuf_setlen(&req_buf, state_len);
458 flushes++;
459 flush_at = next_flush(args, count);
462 * We keep one window "ahead" of the other side, and
463 * will wait for an ACK only on the next one
465 if (!args->stateless_rpc && count == INITIAL_FLUSH)
466 continue;
468 consume_shallow_list(args, fd[0]);
469 do {
470 ack = get_ack(fd[0], result_sha1);
471 if (ack)
472 print_verbose(args, _("got %s %d %s"), "ack",
473 ack, sha1_to_hex(result_sha1));
474 switch (ack) {
475 case ACK:
476 flushes = 0;
477 multi_ack = 0;
478 retval = 0;
479 goto done;
480 case ACK_common:
481 case ACK_ready:
482 case ACK_continue: {
483 struct commit *commit =
484 lookup_commit(result_sha1);
485 if (!commit)
486 die(_("invalid commit %s"), sha1_to_hex(result_sha1));
487 if (args->stateless_rpc
488 && ack == ACK_common
489 && !(commit->object.flags & COMMON)) {
490 /* We need to replay the have for this object
491 * on the next RPC request so the peer knows
492 * it is in common with us.
494 const char *hex = sha1_to_hex(result_sha1);
495 packet_buf_write(&req_buf, "have %s\n", hex);
496 state_len = req_buf.len;
498 * Reset in_vain because an ack
499 * for this commit has not been
500 * seen.
502 in_vain = 0;
503 } else if (!args->stateless_rpc
504 || ack != ACK_common)
505 in_vain = 0;
506 mark_common(commit, 0, 1);
507 retval = 0;
508 got_continue = 1;
509 if (ack == ACK_ready) {
510 clear_prio_queue(&rev_list);
511 got_ready = 1;
513 break;
516 } while (ack);
517 flushes--;
518 if (got_continue && MAX_IN_VAIN < in_vain) {
519 print_verbose(args, _("giving up"));
520 break; /* give up */
524 done:
525 if (!got_ready || !no_done) {
526 packet_buf_write(&req_buf, "done\n");
527 send_request(args, fd[1], &req_buf);
529 print_verbose(args, _("done"));
530 if (retval != 0) {
531 multi_ack = 0;
532 flushes++;
534 strbuf_release(&req_buf);
536 if (!got_ready || !no_done)
537 consume_shallow_list(args, fd[0]);
538 while (flushes || multi_ack) {
539 int ack = get_ack(fd[0], result_sha1);
540 if (ack) {
541 print_verbose(args, _("got %s (%d) %s"), "ack",
542 ack, sha1_to_hex(result_sha1));
543 if (ack == ACK)
544 return 0;
545 multi_ack = 1;
546 continue;
548 flushes--;
550 /* it is no error to fetch into a completely empty repo */
551 return count ? retval : 0;
554 static struct commit_list *complete;
556 static int mark_complete(const unsigned char *sha1)
558 struct object *o = parse_object(sha1);
560 while (o && o->type == OBJ_TAG) {
561 struct tag *t = (struct tag *) o;
562 if (!t->tagged)
563 break; /* broken repository */
564 o->flags |= COMPLETE;
565 o = parse_object(t->tagged->oid.hash);
567 if (o && o->type == OBJ_COMMIT) {
568 struct commit *commit = (struct commit *)o;
569 if (!(commit->object.flags & COMPLETE)) {
570 commit->object.flags |= COMPLETE;
571 commit_list_insert(commit, &complete);
574 return 0;
577 static int mark_complete_oid(const char *refname, const struct object_id *oid,
578 int flag, void *cb_data)
580 return mark_complete(oid->hash);
583 static void mark_recent_complete_commits(struct fetch_pack_args *args,
584 unsigned long cutoff)
586 while (complete && cutoff <= complete->item->date) {
587 print_verbose(args, _("Marking %s as complete"),
588 oid_to_hex(&complete->item->object.oid));
589 pop_most_recent_commit(&complete, COMPLETE);
593 static void filter_refs(struct fetch_pack_args *args,
594 struct ref **refs,
595 struct ref **sought, int nr_sought)
597 struct ref *newlist = NULL;
598 struct ref **newtail = &newlist;
599 struct ref *ref, *next;
600 int i;
602 i = 0;
603 for (ref = *refs; ref; ref = next) {
604 int keep = 0;
605 next = ref->next;
607 if (starts_with(ref->name, "refs/") &&
608 check_refname_format(ref->name, 0))
609 ; /* trash */
610 else {
611 while (i < nr_sought) {
612 int cmp = strcmp(ref->name, sought[i]->name);
613 if (cmp < 0)
614 break; /* definitely do not have it */
615 else if (cmp == 0) {
616 keep = 1; /* definitely have it */
617 sought[i]->matched = 1;
619 i++;
623 if (!keep && args->fetch_all &&
624 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
625 keep = 1;
627 if (keep) {
628 *newtail = ref;
629 ref->next = NULL;
630 newtail = &ref->next;
631 } else {
632 free(ref);
636 /* Append unmatched requests to the list */
637 if ((allow_unadvertised_object_request &
638 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1))) {
639 for (i = 0; i < nr_sought; i++) {
640 unsigned char sha1[20];
642 ref = sought[i];
643 if (ref->matched)
644 continue;
645 if (get_sha1_hex(ref->name, sha1) ||
646 ref->name[40] != '\0' ||
647 hashcmp(sha1, ref->old_oid.hash))
648 continue;
650 ref->matched = 1;
651 *newtail = copy_ref(ref);
652 newtail = &(*newtail)->next;
655 *refs = newlist;
658 static void mark_alternate_complete(struct object *obj)
660 mark_complete(obj->oid.hash);
663 static int everything_local(struct fetch_pack_args *args,
664 struct ref **refs,
665 struct ref **sought, int nr_sought)
667 struct ref *ref;
668 int retval;
669 unsigned long cutoff = 0;
671 save_commit_buffer = 0;
673 for (ref = *refs; ref; ref = ref->next) {
674 struct object *o;
676 if (!has_object_file(&ref->old_oid))
677 continue;
679 o = parse_object(ref->old_oid.hash);
680 if (!o)
681 continue;
683 /* We already have it -- which may mean that we were
684 * in sync with the other side at some time after
685 * that (it is OK if we guess wrong here).
687 if (o->type == OBJ_COMMIT) {
688 struct commit *commit = (struct commit *)o;
689 if (!cutoff || cutoff < commit->date)
690 cutoff = commit->date;
694 if (!args->deepen) {
695 for_each_ref(mark_complete_oid, NULL);
696 for_each_cached_alternate(mark_alternate_complete);
697 commit_list_sort_by_date(&complete);
698 if (cutoff)
699 mark_recent_complete_commits(args, cutoff);
703 * Mark all complete remote refs as common refs.
704 * Don't mark them common yet; the server has to be told so first.
706 for (ref = *refs; ref; ref = ref->next) {
707 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
708 NULL, 0);
710 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
711 continue;
713 if (!(o->flags & SEEN)) {
714 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
716 mark_common((struct commit *)o, 1, 1);
720 filter_refs(args, refs, sought, nr_sought);
722 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
723 const unsigned char *remote = ref->old_oid.hash;
724 struct object *o;
726 o = lookup_object(remote);
727 if (!o || !(o->flags & COMPLETE)) {
728 retval = 0;
729 print_verbose(args, "want %s (%s)", sha1_to_hex(remote),
730 ref->name);
731 continue;
733 print_verbose(args, _("already have %s (%s)"), sha1_to_hex(remote),
734 ref->name);
736 return retval;
739 static int sideband_demux(int in, int out, void *data)
741 int *xd = data;
742 int ret;
744 ret = recv_sideband("fetch-pack", xd[0], out);
745 close(out);
746 return ret;
749 static int get_pack(struct fetch_pack_args *args,
750 int xd[2], char **pack_lockfile)
752 struct async demux;
753 int do_keep = args->keep_pack;
754 const char *cmd_name;
755 struct pack_header header;
756 int pass_header = 0;
757 struct child_process cmd = CHILD_PROCESS_INIT;
758 int ret;
760 memset(&demux, 0, sizeof(demux));
761 if (use_sideband) {
762 /* xd[] is talking with upload-pack; subprocess reads from
763 * xd[0], spits out band#2 to stderr, and feeds us band#1
764 * through demux->out.
766 demux.proc = sideband_demux;
767 demux.data = xd;
768 demux.out = -1;
769 demux.isolate_sigpipe = 1;
770 if (start_async(&demux))
771 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
773 else
774 demux.out = xd[0];
776 if (!args->keep_pack && unpack_limit) {
778 if (read_pack_header(demux.out, &header))
779 die(_("protocol error: bad pack header"));
780 pass_header = 1;
781 if (ntohl(header.hdr_entries) < unpack_limit)
782 do_keep = 0;
783 else
784 do_keep = 1;
787 if (alternate_shallow_file) {
788 argv_array_push(&cmd.args, "--shallow-file");
789 argv_array_push(&cmd.args, alternate_shallow_file);
792 if (do_keep) {
793 if (pack_lockfile)
794 cmd.out = -1;
795 cmd_name = "index-pack";
796 argv_array_push(&cmd.args, cmd_name);
797 argv_array_push(&cmd.args, "--stdin");
798 if (!args->quiet && !args->no_progress)
799 argv_array_push(&cmd.args, "-v");
800 if (args->use_thin_pack)
801 argv_array_push(&cmd.args, "--fix-thin");
802 if (args->lock_pack || unpack_limit) {
803 char hostname[256];
804 if (gethostname(hostname, sizeof(hostname)))
805 xsnprintf(hostname, sizeof(hostname), "localhost");
806 argv_array_pushf(&cmd.args,
807 "--keep=fetch-pack %"PRIuMAX " on %s",
808 (uintmax_t)getpid(), hostname);
810 if (args->check_self_contained_and_connected)
811 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
813 else {
814 cmd_name = "unpack-objects";
815 argv_array_push(&cmd.args, cmd_name);
816 if (args->quiet || args->no_progress)
817 argv_array_push(&cmd.args, "-q");
818 args->check_self_contained_and_connected = 0;
821 if (pass_header)
822 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
823 ntohl(header.hdr_version),
824 ntohl(header.hdr_entries));
825 if (fetch_fsck_objects >= 0
826 ? fetch_fsck_objects
827 : transfer_fsck_objects >= 0
828 ? transfer_fsck_objects
829 : 0)
830 argv_array_push(&cmd.args, "--strict");
832 cmd.in = demux.out;
833 cmd.git_cmd = 1;
834 if (start_command(&cmd))
835 die(_("fetch-pack: unable to fork off %s"), cmd_name);
836 if (do_keep && pack_lockfile) {
837 *pack_lockfile = index_pack_lockfile(cmd.out);
838 close(cmd.out);
841 if (!use_sideband)
842 /* Closed by start_command() */
843 xd[0] = -1;
845 ret = finish_command(&cmd);
846 if (!ret || (args->check_self_contained_and_connected && ret == 1))
847 args->self_contained_and_connected =
848 args->check_self_contained_and_connected &&
849 ret == 0;
850 else
851 die(_("%s failed"), cmd_name);
852 if (use_sideband && finish_async(&demux))
853 die(_("error in sideband demultiplexer"));
854 return 0;
857 static int cmp_ref_by_name(const void *a_, const void *b_)
859 const struct ref *a = *((const struct ref **)a_);
860 const struct ref *b = *((const struct ref **)b_);
861 return strcmp(a->name, b->name);
864 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
865 int fd[2],
866 const struct ref *orig_ref,
867 struct ref **sought, int nr_sought,
868 struct shallow_info *si,
869 char **pack_lockfile)
871 struct ref *ref = copy_ref_list(orig_ref);
872 unsigned char sha1[20];
873 const char *agent_feature;
874 int agent_len;
876 sort_ref_list(&ref, ref_compare_name);
877 QSORT(sought, nr_sought, cmp_ref_by_name);
879 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
880 die(_("Server does not support shallow clients"));
881 if (args->depth > 0 || args->deepen_since || args->deepen_not)
882 args->deepen = 1;
883 if (server_supports("multi_ack_detailed")) {
884 print_verbose(args, _("Server supports multi_ack_detailed"));
885 multi_ack = 2;
886 if (server_supports("no-done")) {
887 print_verbose(args, _("Server supports no-done"));
888 if (args->stateless_rpc)
889 no_done = 1;
892 else if (server_supports("multi_ack")) {
893 print_verbose(args, _("Server supports multi_ack"));
894 multi_ack = 1;
896 if (server_supports("side-band-64k")) {
897 print_verbose(args, _("Server supports side-band-64k"));
898 use_sideband = 2;
900 else if (server_supports("side-band")) {
901 print_verbose(args, _("Server supports side-band"));
902 use_sideband = 1;
904 if (server_supports("allow-tip-sha1-in-want")) {
905 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
906 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
908 if (server_supports("allow-reachable-sha1-in-want")) {
909 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
910 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
912 if (!server_supports("thin-pack"))
913 args->use_thin_pack = 0;
914 if (!server_supports("no-progress"))
915 args->no_progress = 0;
916 if (!server_supports("include-tag"))
917 args->include_tag = 0;
918 if (server_supports("ofs-delta"))
919 print_verbose(args, _("Server supports ofs-delta"));
920 else
921 prefer_ofs_delta = 0;
923 if ((agent_feature = server_feature_value("agent", &agent_len))) {
924 agent_supported = 1;
925 if (agent_len)
926 print_verbose(args, _("Server version is %.*s"),
927 agent_len, agent_feature);
929 if (server_supports("deepen-since"))
930 deepen_since_ok = 1;
931 else if (args->deepen_since)
932 die(_("Server does not support --shallow-since"));
933 if (server_supports("deepen-not"))
934 deepen_not_ok = 1;
935 else if (args->deepen_not)
936 die(_("Server does not support --shallow-exclude"));
937 if (!server_supports("deepen-relative") && args->deepen_relative)
938 die(_("Server does not support --deepen"));
940 if (everything_local(args, &ref, sought, nr_sought)) {
941 packet_flush(fd[1]);
942 goto all_done;
944 if (find_common(args, fd, sha1, ref) < 0)
945 if (!args->keep_pack)
946 /* When cloning, it is not unusual to have
947 * no common commit.
949 warning(_("no common commits"));
951 if (args->stateless_rpc)
952 packet_flush(fd[1]);
953 if (args->deepen)
954 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
955 NULL);
956 else if (si->nr_ours || si->nr_theirs)
957 alternate_shallow_file = setup_temporary_shallow(si->shallow);
958 else
959 alternate_shallow_file = NULL;
960 if (get_pack(args, fd, pack_lockfile))
961 die(_("git fetch-pack: fetch failed."));
963 all_done:
964 return ref;
967 static void fetch_pack_config(void)
969 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
970 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
971 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
972 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
973 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
975 git_config(git_default_config, NULL);
978 static void fetch_pack_setup(void)
980 static int did_setup;
981 if (did_setup)
982 return;
983 fetch_pack_config();
984 if (0 <= transfer_unpack_limit)
985 unpack_limit = transfer_unpack_limit;
986 else if (0 <= fetch_unpack_limit)
987 unpack_limit = fetch_unpack_limit;
988 did_setup = 1;
991 static int remove_duplicates_in_refs(struct ref **ref, int nr)
993 struct string_list names = STRING_LIST_INIT_NODUP;
994 int src, dst;
996 for (src = dst = 0; src < nr; src++) {
997 struct string_list_item *item;
998 item = string_list_insert(&names, ref[src]->name);
999 if (item->util)
1000 continue; /* already have it */
1001 item->util = ref[src];
1002 if (src != dst)
1003 ref[dst] = ref[src];
1004 dst++;
1006 for (src = dst; src < nr; src++)
1007 ref[src] = NULL;
1008 string_list_clear(&names, 0);
1009 return dst;
1012 static void update_shallow(struct fetch_pack_args *args,
1013 struct ref **sought, int nr_sought,
1014 struct shallow_info *si)
1016 struct sha1_array ref = SHA1_ARRAY_INIT;
1017 int *status;
1018 int i;
1020 if (args->deepen && alternate_shallow_file) {
1021 if (*alternate_shallow_file == '\0') { /* --unshallow */
1022 unlink_or_warn(git_path_shallow());
1023 rollback_lock_file(&shallow_lock);
1024 } else
1025 commit_lock_file(&shallow_lock);
1026 return;
1029 if (!si->shallow || !si->shallow->nr)
1030 return;
1032 if (args->cloning) {
1034 * remote is shallow, but this is a clone, there are
1035 * no objects in repo to worry about. Accept any
1036 * shallow points that exist in the pack (iow in repo
1037 * after get_pack() and reprepare_packed_git())
1039 struct sha1_array extra = SHA1_ARRAY_INIT;
1040 unsigned char (*sha1)[20] = si->shallow->sha1;
1041 for (i = 0; i < si->shallow->nr; i++)
1042 if (has_sha1_file(sha1[i]))
1043 sha1_array_append(&extra, sha1[i]);
1044 if (extra.nr) {
1045 setup_alternate_shallow(&shallow_lock,
1046 &alternate_shallow_file,
1047 &extra);
1048 commit_lock_file(&shallow_lock);
1050 sha1_array_clear(&extra);
1051 return;
1054 if (!si->nr_ours && !si->nr_theirs)
1055 return;
1057 remove_nonexistent_theirs_shallow(si);
1058 if (!si->nr_ours && !si->nr_theirs)
1059 return;
1060 for (i = 0; i < nr_sought; i++)
1061 sha1_array_append(&ref, sought[i]->old_oid.hash);
1062 si->ref = &ref;
1064 if (args->update_shallow) {
1066 * remote is also shallow, .git/shallow may be updated
1067 * so all refs can be accepted. Make sure we only add
1068 * shallow roots that are actually reachable from new
1069 * refs.
1071 struct sha1_array extra = SHA1_ARRAY_INIT;
1072 unsigned char (*sha1)[20] = si->shallow->sha1;
1073 assign_shallow_commits_to_refs(si, NULL, NULL);
1074 if (!si->nr_ours && !si->nr_theirs) {
1075 sha1_array_clear(&ref);
1076 return;
1078 for (i = 0; i < si->nr_ours; i++)
1079 sha1_array_append(&extra, sha1[si->ours[i]]);
1080 for (i = 0; i < si->nr_theirs; i++)
1081 sha1_array_append(&extra, sha1[si->theirs[i]]);
1082 setup_alternate_shallow(&shallow_lock,
1083 &alternate_shallow_file,
1084 &extra);
1085 commit_lock_file(&shallow_lock);
1086 sha1_array_clear(&extra);
1087 sha1_array_clear(&ref);
1088 return;
1092 * remote is also shallow, check what ref is safe to update
1093 * without updating .git/shallow
1095 status = xcalloc(nr_sought, sizeof(*status));
1096 assign_shallow_commits_to_refs(si, NULL, status);
1097 if (si->nr_ours || si->nr_theirs) {
1098 for (i = 0; i < nr_sought; i++)
1099 if (status[i])
1100 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1102 free(status);
1103 sha1_array_clear(&ref);
1106 struct ref *fetch_pack(struct fetch_pack_args *args,
1107 int fd[], struct child_process *conn,
1108 const struct ref *ref,
1109 const char *dest,
1110 struct ref **sought, int nr_sought,
1111 struct sha1_array *shallow,
1112 char **pack_lockfile)
1114 struct ref *ref_cpy;
1115 struct shallow_info si;
1117 fetch_pack_setup();
1118 if (nr_sought)
1119 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1121 if (!ref) {
1122 packet_flush(fd[1]);
1123 die(_("no matching remote head"));
1125 prepare_shallow_info(&si, shallow);
1126 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1127 &si, pack_lockfile);
1128 reprepare_packed_git();
1129 update_shallow(args, sought, nr_sought, &si);
1130 clear_shallow_info(&si);
1131 return ref_cpy;