verify_pack: do not ignore return value of verification function
[git/debian.git] / fetch-pack.c
blob820251a8d80518508514b728b4b2afd8a171e296
1 #include "cache.h"
2 #include "lockfile.h"
3 #include "refs.h"
4 #include "pkt-line.h"
5 #include "commit.h"
6 #include "tag.h"
7 #include "exec_cmd.h"
8 #include "pack.h"
9 #include "sideband.h"
10 #include "fetch-pack.h"
11 #include "remote.h"
12 #include "run-command.h"
13 #include "connect.h"
14 #include "transport.h"
15 #include "version.h"
16 #include "prio-queue.h"
17 #include "sha1-array.h"
19 static int transfer_unpack_limit = -1;
20 static int fetch_unpack_limit = -1;
21 static int unpack_limit = 100;
22 static int prefer_ofs_delta = 1;
23 static int no_done;
24 static int fetch_fsck_objects = -1;
25 static int transfer_fsck_objects = -1;
26 static int agent_supported;
27 static struct lock_file shallow_lock;
28 static const char *alternate_shallow_file;
30 /* Remember to update object flag allocation in object.h */
31 #define COMPLETE (1U << 0)
32 #define COMMON (1U << 1)
33 #define COMMON_REF (1U << 2)
34 #define SEEN (1U << 3)
35 #define POPPED (1U << 4)
37 static int marked;
40 * After sending this many "have"s if we do not get any new ACK , we
41 * give up traversing our history.
43 #define MAX_IN_VAIN 256
45 static struct prio_queue rev_list = { compare_commits_by_commit_date };
46 static int non_common_revs, multi_ack, use_sideband;
47 /* Allow specifying sha1 if it is a ref tip. */
48 #define ALLOW_TIP_SHA1 01
49 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
50 #define ALLOW_REACHABLE_SHA1 02
51 static unsigned int allow_unadvertised_object_request;
53 static void rev_list_push(struct commit *commit, int mark)
55 if (!(commit->object.flags & mark)) {
56 commit->object.flags |= mark;
58 if (parse_commit(commit))
59 return;
61 prio_queue_put(&rev_list, commit);
63 if (!(commit->object.flags & COMMON))
64 non_common_revs++;
68 static int rev_list_insert_ref(const char *refname, const unsigned char *sha1)
70 struct object *o = deref_tag(parse_object(sha1), refname, 0);
72 if (o && o->type == OBJ_COMMIT)
73 rev_list_push((struct commit *)o, SEEN);
75 return 0;
78 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
79 int flag, void *cb_data)
81 return rev_list_insert_ref(refname, oid->hash);
84 static int clear_marks(const char *refname, const struct object_id *oid,
85 int flag, void *cb_data)
87 struct object *o = deref_tag(parse_object(oid->hash), refname, 0);
89 if (o && o->type == OBJ_COMMIT)
90 clear_commit_marks((struct commit *)o,
91 COMMON | COMMON_REF | SEEN | POPPED);
92 return 0;
96 This function marks a rev and its ancestors as common.
97 In some cases, it is desirable to mark only the ancestors (for example
98 when only the server does not yet know that they are common).
101 static void mark_common(struct commit *commit,
102 int ancestors_only, int dont_parse)
104 if (commit != NULL && !(commit->object.flags & COMMON)) {
105 struct object *o = (struct object *)commit;
107 if (!ancestors_only)
108 o->flags |= COMMON;
110 if (!(o->flags & SEEN))
111 rev_list_push(commit, SEEN);
112 else {
113 struct commit_list *parents;
115 if (!ancestors_only && !(o->flags & POPPED))
116 non_common_revs--;
117 if (!o->parsed && !dont_parse)
118 if (parse_commit(commit))
119 return;
121 for (parents = commit->parents;
122 parents;
123 parents = parents->next)
124 mark_common(parents->item, 0, dont_parse);
130 Get the next rev to send, ignoring the common.
133 static const unsigned char *get_rev(void)
135 struct commit *commit = NULL;
137 while (commit == NULL) {
138 unsigned int mark;
139 struct commit_list *parents;
141 if (rev_list.nr == 0 || non_common_revs == 0)
142 return NULL;
144 commit = prio_queue_get(&rev_list);
145 parse_commit(commit);
146 parents = commit->parents;
148 commit->object.flags |= POPPED;
149 if (!(commit->object.flags & COMMON))
150 non_common_revs--;
152 if (commit->object.flags & COMMON) {
153 /* do not send "have", and ignore ancestors */
154 commit = NULL;
155 mark = COMMON | SEEN;
156 } else if (commit->object.flags & COMMON_REF)
157 /* send "have", and ignore ancestors */
158 mark = COMMON | SEEN;
159 else
160 /* send "have", also for its ancestors */
161 mark = SEEN;
163 while (parents) {
164 if (!(parents->item->object.flags & SEEN))
165 rev_list_push(parents->item, mark);
166 if (mark & COMMON)
167 mark_common(parents->item, 1, 0);
168 parents = parents->next;
172 return commit->object.sha1;
175 enum ack_type {
176 NAK = 0,
177 ACK,
178 ACK_continue,
179 ACK_common,
180 ACK_ready
183 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
185 if (args->stateless_rpc && args->depth > 0) {
186 /* If we sent a depth we will get back "duplicate"
187 * shallow and unshallow commands every time there
188 * is a block of have lines exchanged.
190 char *line;
191 while ((line = packet_read_line(fd, NULL))) {
192 if (starts_with(line, "shallow "))
193 continue;
194 if (starts_with(line, "unshallow "))
195 continue;
196 die("git fetch-pack: expected shallow list");
201 static enum ack_type get_ack(int fd, unsigned char *result_sha1)
203 int len;
204 char *line = packet_read_line(fd, &len);
205 const char *arg;
207 if (!len)
208 die("git fetch-pack: expected ACK/NAK, got EOF");
209 if (!strcmp(line, "NAK"))
210 return NAK;
211 if (skip_prefix(line, "ACK ", &arg)) {
212 if (!get_sha1_hex(arg, result_sha1)) {
213 arg += 40;
214 len -= arg - line;
215 if (len < 1)
216 return ACK;
217 if (strstr(arg, "continue"))
218 return ACK_continue;
219 if (strstr(arg, "common"))
220 return ACK_common;
221 if (strstr(arg, "ready"))
222 return ACK_ready;
223 return ACK;
226 die("git fetch_pack: expected ACK/NAK, got '%s'", line);
229 static void send_request(struct fetch_pack_args *args,
230 int fd, struct strbuf *buf)
232 if (args->stateless_rpc) {
233 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
234 packet_flush(fd);
235 } else
236 write_or_die(fd, buf->buf, buf->len);
239 static void insert_one_alternate_ref(const struct ref *ref, void *unused)
241 rev_list_insert_ref(NULL, ref->old_sha1);
244 #define INITIAL_FLUSH 16
245 #define PIPESAFE_FLUSH 32
246 #define LARGE_FLUSH 1024
248 static int next_flush(struct fetch_pack_args *args, int count)
250 int flush_limit = args->stateless_rpc ? LARGE_FLUSH : PIPESAFE_FLUSH;
252 if (count < flush_limit)
253 count <<= 1;
254 else
255 count += flush_limit;
256 return count;
259 static int find_common(struct fetch_pack_args *args,
260 int fd[2], unsigned char *result_sha1,
261 struct ref *refs)
263 int fetching;
264 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
265 const unsigned char *sha1;
266 unsigned in_vain = 0;
267 int got_continue = 0;
268 int got_ready = 0;
269 struct strbuf req_buf = STRBUF_INIT;
270 size_t state_len = 0;
272 if (args->stateless_rpc && multi_ack == 1)
273 die("--stateless-rpc requires multi_ack_detailed");
274 if (marked)
275 for_each_ref(clear_marks, NULL);
276 marked = 1;
278 for_each_ref(rev_list_insert_ref_oid, NULL);
279 for_each_alternate_ref(insert_one_alternate_ref, NULL);
281 fetching = 0;
282 for ( ; refs ; refs = refs->next) {
283 unsigned char *remote = refs->old_sha1;
284 const char *remote_hex;
285 struct object *o;
288 * If that object is complete (i.e. it is an ancestor of a
289 * local ref), we tell them we have it but do not have to
290 * tell them about its ancestors, which they already know
291 * about.
293 * We use lookup_object here because we are only
294 * interested in the case we *know* the object is
295 * reachable and we have already scanned it.
297 if (((o = lookup_object(remote)) != NULL) &&
298 (o->flags & COMPLETE)) {
299 continue;
302 remote_hex = sha1_to_hex(remote);
303 if (!fetching) {
304 struct strbuf c = STRBUF_INIT;
305 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
306 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
307 if (no_done) strbuf_addstr(&c, " no-done");
308 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
309 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
310 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
311 if (args->no_progress) strbuf_addstr(&c, " no-progress");
312 if (args->include_tag) strbuf_addstr(&c, " include-tag");
313 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
314 if (agent_supported) strbuf_addf(&c, " agent=%s",
315 git_user_agent_sanitized());
316 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
317 strbuf_release(&c);
318 } else
319 packet_buf_write(&req_buf, "want %s\n", remote_hex);
320 fetching++;
323 if (!fetching) {
324 strbuf_release(&req_buf);
325 packet_flush(fd[1]);
326 return 1;
329 if (is_repository_shallow())
330 write_shallow_commits(&req_buf, 1, NULL);
331 if (args->depth > 0)
332 packet_buf_write(&req_buf, "deepen %d", args->depth);
333 packet_buf_flush(&req_buf);
334 state_len = req_buf.len;
336 if (args->depth > 0) {
337 char *line;
338 const char *arg;
339 unsigned char sha1[20];
341 send_request(args, fd[1], &req_buf);
342 while ((line = packet_read_line(fd[0], NULL))) {
343 if (skip_prefix(line, "shallow ", &arg)) {
344 if (get_sha1_hex(arg, sha1))
345 die("invalid shallow line: %s", line);
346 register_shallow(sha1);
347 continue;
349 if (skip_prefix(line, "unshallow ", &arg)) {
350 if (get_sha1_hex(arg, sha1))
351 die("invalid unshallow line: %s", line);
352 if (!lookup_object(sha1))
353 die("object not found: %s", line);
354 /* make sure that it is parsed as shallow */
355 if (!parse_object(sha1))
356 die("error in object: %s", line);
357 if (unregister_shallow(sha1))
358 die("no shallow found: %s", line);
359 continue;
361 die("expected shallow/unshallow, got %s", line);
363 } else if (!args->stateless_rpc)
364 send_request(args, fd[1], &req_buf);
366 if (!args->stateless_rpc) {
367 /* If we aren't using the stateless-rpc interface
368 * we don't need to retain the headers.
370 strbuf_setlen(&req_buf, 0);
371 state_len = 0;
374 flushes = 0;
375 retval = -1;
376 while ((sha1 = get_rev())) {
377 packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
378 if (args->verbose)
379 fprintf(stderr, "have %s\n", sha1_to_hex(sha1));
380 in_vain++;
381 if (flush_at <= ++count) {
382 int ack;
384 packet_buf_flush(&req_buf);
385 send_request(args, fd[1], &req_buf);
386 strbuf_setlen(&req_buf, state_len);
387 flushes++;
388 flush_at = next_flush(args, count);
391 * We keep one window "ahead" of the other side, and
392 * will wait for an ACK only on the next one
394 if (!args->stateless_rpc && count == INITIAL_FLUSH)
395 continue;
397 consume_shallow_list(args, fd[0]);
398 do {
399 ack = get_ack(fd[0], result_sha1);
400 if (args->verbose && ack)
401 fprintf(stderr, "got ack %d %s\n", ack,
402 sha1_to_hex(result_sha1));
403 switch (ack) {
404 case ACK:
405 flushes = 0;
406 multi_ack = 0;
407 retval = 0;
408 goto done;
409 case ACK_common:
410 case ACK_ready:
411 case ACK_continue: {
412 struct commit *commit =
413 lookup_commit(result_sha1);
414 if (!commit)
415 die("invalid commit %s", sha1_to_hex(result_sha1));
416 if (args->stateless_rpc
417 && ack == ACK_common
418 && !(commit->object.flags & COMMON)) {
419 /* We need to replay the have for this object
420 * on the next RPC request so the peer knows
421 * it is in common with us.
423 const char *hex = sha1_to_hex(result_sha1);
424 packet_buf_write(&req_buf, "have %s\n", hex);
425 state_len = req_buf.len;
427 mark_common(commit, 0, 1);
428 retval = 0;
429 in_vain = 0;
430 got_continue = 1;
431 if (ack == ACK_ready) {
432 clear_prio_queue(&rev_list);
433 got_ready = 1;
435 break;
438 } while (ack);
439 flushes--;
440 if (got_continue && MAX_IN_VAIN < in_vain) {
441 if (args->verbose)
442 fprintf(stderr, "giving up\n");
443 break; /* give up */
447 done:
448 if (!got_ready || !no_done) {
449 packet_buf_write(&req_buf, "done\n");
450 send_request(args, fd[1], &req_buf);
452 if (args->verbose)
453 fprintf(stderr, "done\n");
454 if (retval != 0) {
455 multi_ack = 0;
456 flushes++;
458 strbuf_release(&req_buf);
460 if (!got_ready || !no_done)
461 consume_shallow_list(args, fd[0]);
462 while (flushes || multi_ack) {
463 int ack = get_ack(fd[0], result_sha1);
464 if (ack) {
465 if (args->verbose)
466 fprintf(stderr, "got ack (%d) %s\n", ack,
467 sha1_to_hex(result_sha1));
468 if (ack == ACK)
469 return 0;
470 multi_ack = 1;
471 continue;
473 flushes--;
475 /* it is no error to fetch into a completely empty repo */
476 return count ? retval : 0;
479 static struct commit_list *complete;
481 static int mark_complete(const unsigned char *sha1)
483 struct object *o = parse_object(sha1);
485 while (o && o->type == OBJ_TAG) {
486 struct tag *t = (struct tag *) o;
487 if (!t->tagged)
488 break; /* broken repository */
489 o->flags |= COMPLETE;
490 o = parse_object(t->tagged->sha1);
492 if (o && o->type == OBJ_COMMIT) {
493 struct commit *commit = (struct commit *)o;
494 if (!(commit->object.flags & COMPLETE)) {
495 commit->object.flags |= COMPLETE;
496 commit_list_insert(commit, &complete);
499 return 0;
502 static int mark_complete_oid(const char *refname, const struct object_id *oid,
503 int flag, void *cb_data)
505 return mark_complete(oid->hash);
508 static void mark_recent_complete_commits(struct fetch_pack_args *args,
509 unsigned long cutoff)
511 while (complete && cutoff <= complete->item->date) {
512 if (args->verbose)
513 fprintf(stderr, "Marking %s as complete\n",
514 sha1_to_hex(complete->item->object.sha1));
515 pop_most_recent_commit(&complete, COMPLETE);
519 static void filter_refs(struct fetch_pack_args *args,
520 struct ref **refs,
521 struct ref **sought, int nr_sought)
523 struct ref *newlist = NULL;
524 struct ref **newtail = &newlist;
525 struct ref *ref, *next;
526 int i;
528 i = 0;
529 for (ref = *refs; ref; ref = next) {
530 int keep = 0;
531 next = ref->next;
533 if (starts_with(ref->name, "refs/") &&
534 check_refname_format(ref->name, 0))
535 ; /* trash */
536 else {
537 while (i < nr_sought) {
538 int cmp = strcmp(ref->name, sought[i]->name);
539 if (cmp < 0)
540 break; /* definitely do not have it */
541 else if (cmp == 0) {
542 keep = 1; /* definitely have it */
543 sought[i]->matched = 1;
545 i++;
549 if (!keep && args->fetch_all &&
550 (!args->depth || !starts_with(ref->name, "refs/tags/")))
551 keep = 1;
553 if (keep) {
554 *newtail = ref;
555 ref->next = NULL;
556 newtail = &ref->next;
557 } else {
558 free(ref);
562 /* Append unmatched requests to the list */
563 if ((allow_unadvertised_object_request &
564 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1))) {
565 for (i = 0; i < nr_sought; i++) {
566 unsigned char sha1[20];
568 ref = sought[i];
569 if (ref->matched)
570 continue;
571 if (get_sha1_hex(ref->name, sha1) ||
572 ref->name[40] != '\0' ||
573 hashcmp(sha1, ref->old_sha1))
574 continue;
576 ref->matched = 1;
577 *newtail = copy_ref(ref);
578 newtail = &(*newtail)->next;
581 *refs = newlist;
584 static void mark_alternate_complete(const struct ref *ref, void *unused)
586 mark_complete(ref->old_sha1);
589 static int everything_local(struct fetch_pack_args *args,
590 struct ref **refs,
591 struct ref **sought, int nr_sought)
593 struct ref *ref;
594 int retval;
595 unsigned long cutoff = 0;
597 save_commit_buffer = 0;
599 for (ref = *refs; ref; ref = ref->next) {
600 struct object *o;
602 if (!has_sha1_file(ref->old_sha1))
603 continue;
605 o = parse_object(ref->old_sha1);
606 if (!o)
607 continue;
609 /* We already have it -- which may mean that we were
610 * in sync with the other side at some time after
611 * that (it is OK if we guess wrong here).
613 if (o->type == OBJ_COMMIT) {
614 struct commit *commit = (struct commit *)o;
615 if (!cutoff || cutoff < commit->date)
616 cutoff = commit->date;
620 if (!args->depth) {
621 for_each_ref(mark_complete_oid, NULL);
622 for_each_alternate_ref(mark_alternate_complete, NULL);
623 commit_list_sort_by_date(&complete);
624 if (cutoff)
625 mark_recent_complete_commits(args, cutoff);
629 * Mark all complete remote refs as common refs.
630 * Don't mark them common yet; the server has to be told so first.
632 for (ref = *refs; ref; ref = ref->next) {
633 struct object *o = deref_tag(lookup_object(ref->old_sha1),
634 NULL, 0);
636 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
637 continue;
639 if (!(o->flags & SEEN)) {
640 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
642 mark_common((struct commit *)o, 1, 1);
646 filter_refs(args, refs, sought, nr_sought);
648 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
649 const unsigned char *remote = ref->old_sha1;
650 struct object *o;
652 o = lookup_object(remote);
653 if (!o || !(o->flags & COMPLETE)) {
654 retval = 0;
655 if (!args->verbose)
656 continue;
657 fprintf(stderr,
658 "want %s (%s)\n", sha1_to_hex(remote),
659 ref->name);
660 continue;
662 if (!args->verbose)
663 continue;
664 fprintf(stderr,
665 "already have %s (%s)\n", sha1_to_hex(remote),
666 ref->name);
668 return retval;
671 static int sideband_demux(int in, int out, void *data)
673 int *xd = data;
675 int ret = recv_sideband("fetch-pack", xd[0], out);
676 close(out);
677 return ret;
680 static int get_pack(struct fetch_pack_args *args,
681 int xd[2], char **pack_lockfile)
683 struct async demux;
684 const char *argv[22];
685 char keep_arg[256];
686 char hdr_arg[256];
687 const char **av, *cmd_name;
688 int do_keep = args->keep_pack;
689 struct child_process cmd = CHILD_PROCESS_INIT;
690 int ret;
692 memset(&demux, 0, sizeof(demux));
693 if (use_sideband) {
694 /* xd[] is talking with upload-pack; subprocess reads from
695 * xd[0], spits out band#2 to stderr, and feeds us band#1
696 * through demux->out.
698 demux.proc = sideband_demux;
699 demux.data = xd;
700 demux.out = -1;
701 if (start_async(&demux))
702 die("fetch-pack: unable to fork off sideband"
703 " demultiplexer");
705 else
706 demux.out = xd[0];
708 cmd.argv = argv;
709 av = argv;
710 *hdr_arg = 0;
711 if (!args->keep_pack && unpack_limit) {
712 struct pack_header header;
714 if (read_pack_header(demux.out, &header))
715 die("protocol error: bad pack header");
716 snprintf(hdr_arg, sizeof(hdr_arg),
717 "--pack_header=%"PRIu32",%"PRIu32,
718 ntohl(header.hdr_version), ntohl(header.hdr_entries));
719 if (ntohl(header.hdr_entries) < unpack_limit)
720 do_keep = 0;
721 else
722 do_keep = 1;
725 if (alternate_shallow_file) {
726 *av++ = "--shallow-file";
727 *av++ = alternate_shallow_file;
730 if (do_keep) {
731 if (pack_lockfile)
732 cmd.out = -1;
733 *av++ = cmd_name = "index-pack";
734 *av++ = "--stdin";
735 if (!args->quiet && !args->no_progress)
736 *av++ = "-v";
737 if (args->use_thin_pack)
738 *av++ = "--fix-thin";
739 if (args->lock_pack || unpack_limit) {
740 int s = sprintf(keep_arg,
741 "--keep=fetch-pack %"PRIuMAX " on ", (uintmax_t) getpid());
742 if (gethostname(keep_arg + s, sizeof(keep_arg) - s))
743 strcpy(keep_arg + s, "localhost");
744 *av++ = keep_arg;
746 if (args->check_self_contained_and_connected)
747 *av++ = "--check-self-contained-and-connected";
749 else {
750 *av++ = cmd_name = "unpack-objects";
751 if (args->quiet || args->no_progress)
752 *av++ = "-q";
753 args->check_self_contained_and_connected = 0;
755 if (*hdr_arg)
756 *av++ = hdr_arg;
757 if (fetch_fsck_objects >= 0
758 ? fetch_fsck_objects
759 : transfer_fsck_objects >= 0
760 ? transfer_fsck_objects
761 : 0)
762 *av++ = "--strict";
763 *av++ = NULL;
765 cmd.in = demux.out;
766 cmd.git_cmd = 1;
767 if (start_command(&cmd))
768 die("fetch-pack: unable to fork off %s", cmd_name);
769 if (do_keep && pack_lockfile) {
770 *pack_lockfile = index_pack_lockfile(cmd.out);
771 close(cmd.out);
774 if (!use_sideband)
775 /* Closed by start_command() */
776 xd[0] = -1;
778 ret = finish_command(&cmd);
779 if (!ret || (args->check_self_contained_and_connected && ret == 1))
780 args->self_contained_and_connected =
781 args->check_self_contained_and_connected &&
782 ret == 0;
783 else
784 die("%s failed", cmd_name);
785 if (use_sideband && finish_async(&demux))
786 die("error in sideband demultiplexer");
787 return 0;
790 static int cmp_ref_by_name(const void *a_, const void *b_)
792 const struct ref *a = *((const struct ref **)a_);
793 const struct ref *b = *((const struct ref **)b_);
794 return strcmp(a->name, b->name);
797 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
798 int fd[2],
799 const struct ref *orig_ref,
800 struct ref **sought, int nr_sought,
801 struct shallow_info *si,
802 char **pack_lockfile)
804 struct ref *ref = copy_ref_list(orig_ref);
805 unsigned char sha1[20];
806 const char *agent_feature;
807 int agent_len;
809 sort_ref_list(&ref, ref_compare_name);
810 qsort(sought, nr_sought, sizeof(*sought), cmp_ref_by_name);
812 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
813 die("Server does not support shallow clients");
814 if (server_supports("multi_ack_detailed")) {
815 if (args->verbose)
816 fprintf(stderr, "Server supports multi_ack_detailed\n");
817 multi_ack = 2;
818 if (server_supports("no-done")) {
819 if (args->verbose)
820 fprintf(stderr, "Server supports no-done\n");
821 if (args->stateless_rpc)
822 no_done = 1;
825 else if (server_supports("multi_ack")) {
826 if (args->verbose)
827 fprintf(stderr, "Server supports multi_ack\n");
828 multi_ack = 1;
830 if (server_supports("side-band-64k")) {
831 if (args->verbose)
832 fprintf(stderr, "Server supports side-band-64k\n");
833 use_sideband = 2;
835 else if (server_supports("side-band")) {
836 if (args->verbose)
837 fprintf(stderr, "Server supports side-band\n");
838 use_sideband = 1;
840 if (server_supports("allow-tip-sha1-in-want")) {
841 if (args->verbose)
842 fprintf(stderr, "Server supports allow-tip-sha1-in-want\n");
843 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
845 if (server_supports("allow-reachable-sha1-in-want")) {
846 if (args->verbose)
847 fprintf(stderr, "Server supports allow-reachable-sha1-in-want\n");
848 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
850 if (!server_supports("thin-pack"))
851 args->use_thin_pack = 0;
852 if (!server_supports("no-progress"))
853 args->no_progress = 0;
854 if (!server_supports("include-tag"))
855 args->include_tag = 0;
856 if (server_supports("ofs-delta")) {
857 if (args->verbose)
858 fprintf(stderr, "Server supports ofs-delta\n");
859 } else
860 prefer_ofs_delta = 0;
862 if ((agent_feature = server_feature_value("agent", &agent_len))) {
863 agent_supported = 1;
864 if (args->verbose && agent_len)
865 fprintf(stderr, "Server version is %.*s\n",
866 agent_len, agent_feature);
869 if (everything_local(args, &ref, sought, nr_sought)) {
870 packet_flush(fd[1]);
871 goto all_done;
873 if (find_common(args, fd, sha1, ref) < 0)
874 if (!args->keep_pack)
875 /* When cloning, it is not unusual to have
876 * no common commit.
878 warning("no common commits");
880 if (args->stateless_rpc)
881 packet_flush(fd[1]);
882 if (args->depth > 0)
883 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
884 NULL);
885 else if (si->nr_ours || si->nr_theirs)
886 alternate_shallow_file = setup_temporary_shallow(si->shallow);
887 else
888 alternate_shallow_file = NULL;
889 if (get_pack(args, fd, pack_lockfile))
890 die("git fetch-pack: fetch failed.");
892 all_done:
893 return ref;
896 static void fetch_pack_config(void)
898 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
899 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
900 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
901 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
902 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
904 git_config(git_default_config, NULL);
907 static void fetch_pack_setup(void)
909 static int did_setup;
910 if (did_setup)
911 return;
912 fetch_pack_config();
913 if (0 <= transfer_unpack_limit)
914 unpack_limit = transfer_unpack_limit;
915 else if (0 <= fetch_unpack_limit)
916 unpack_limit = fetch_unpack_limit;
917 did_setup = 1;
920 static int remove_duplicates_in_refs(struct ref **ref, int nr)
922 struct string_list names = STRING_LIST_INIT_NODUP;
923 int src, dst;
925 for (src = dst = 0; src < nr; src++) {
926 struct string_list_item *item;
927 item = string_list_insert(&names, ref[src]->name);
928 if (item->util)
929 continue; /* already have it */
930 item->util = ref[src];
931 if (src != dst)
932 ref[dst] = ref[src];
933 dst++;
935 for (src = dst; src < nr; src++)
936 ref[src] = NULL;
937 string_list_clear(&names, 0);
938 return dst;
941 static void update_shallow(struct fetch_pack_args *args,
942 struct ref **sought, int nr_sought,
943 struct shallow_info *si)
945 struct sha1_array ref = SHA1_ARRAY_INIT;
946 int *status;
947 int i;
949 if (args->depth > 0 && alternate_shallow_file) {
950 if (*alternate_shallow_file == '\0') { /* --unshallow */
951 unlink_or_warn(git_path_shallow());
952 rollback_lock_file(&shallow_lock);
953 } else
954 commit_lock_file(&shallow_lock);
955 return;
958 if (!si->shallow || !si->shallow->nr)
959 return;
961 if (args->cloning) {
963 * remote is shallow, but this is a clone, there are
964 * no objects in repo to worry about. Accept any
965 * shallow points that exist in the pack (iow in repo
966 * after get_pack() and reprepare_packed_git())
968 struct sha1_array extra = SHA1_ARRAY_INIT;
969 unsigned char (*sha1)[20] = si->shallow->sha1;
970 for (i = 0; i < si->shallow->nr; i++)
971 if (has_sha1_file(sha1[i]))
972 sha1_array_append(&extra, sha1[i]);
973 if (extra.nr) {
974 setup_alternate_shallow(&shallow_lock,
975 &alternate_shallow_file,
976 &extra);
977 commit_lock_file(&shallow_lock);
979 sha1_array_clear(&extra);
980 return;
983 if (!si->nr_ours && !si->nr_theirs)
984 return;
986 remove_nonexistent_theirs_shallow(si);
987 if (!si->nr_ours && !si->nr_theirs)
988 return;
989 for (i = 0; i < nr_sought; i++)
990 sha1_array_append(&ref, sought[i]->old_sha1);
991 si->ref = &ref;
993 if (args->update_shallow) {
995 * remote is also shallow, .git/shallow may be updated
996 * so all refs can be accepted. Make sure we only add
997 * shallow roots that are actually reachable from new
998 * refs.
1000 struct sha1_array extra = SHA1_ARRAY_INIT;
1001 unsigned char (*sha1)[20] = si->shallow->sha1;
1002 assign_shallow_commits_to_refs(si, NULL, NULL);
1003 if (!si->nr_ours && !si->nr_theirs) {
1004 sha1_array_clear(&ref);
1005 return;
1007 for (i = 0; i < si->nr_ours; i++)
1008 sha1_array_append(&extra, sha1[si->ours[i]]);
1009 for (i = 0; i < si->nr_theirs; i++)
1010 sha1_array_append(&extra, sha1[si->theirs[i]]);
1011 setup_alternate_shallow(&shallow_lock,
1012 &alternate_shallow_file,
1013 &extra);
1014 commit_lock_file(&shallow_lock);
1015 sha1_array_clear(&extra);
1016 sha1_array_clear(&ref);
1017 return;
1021 * remote is also shallow, check what ref is safe to update
1022 * without updating .git/shallow
1024 status = xcalloc(nr_sought, sizeof(*status));
1025 assign_shallow_commits_to_refs(si, NULL, status);
1026 if (si->nr_ours || si->nr_theirs) {
1027 for (i = 0; i < nr_sought; i++)
1028 if (status[i])
1029 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1031 free(status);
1032 sha1_array_clear(&ref);
1035 struct ref *fetch_pack(struct fetch_pack_args *args,
1036 int fd[], struct child_process *conn,
1037 const struct ref *ref,
1038 const char *dest,
1039 struct ref **sought, int nr_sought,
1040 struct sha1_array *shallow,
1041 char **pack_lockfile)
1043 struct ref *ref_cpy;
1044 struct shallow_info si;
1046 fetch_pack_setup();
1047 if (nr_sought)
1048 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1050 if (!ref) {
1051 packet_flush(fd[1]);
1052 die("no matching remote head");
1054 prepare_shallow_info(&si, shallow);
1055 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1056 &si, pack_lockfile);
1057 reprepare_packed_git();
1058 update_shallow(args, sought, nr_sought, &si);
1059 clear_shallow_info(&si);
1060 return ref_cpy;