Skip tests that fail due to incomplete implementations, missing tools...
[git/mingw/j6t.git] / fetch-pack.c
blob2dabee97b28915ccf5c06b58dd0193bf03a01db8
1 #include "cache.h"
2 #include "lockfile.h"
3 #include "refs.h"
4 #include "pkt-line.h"
5 #include "commit.h"
6 #include "tag.h"
7 #include "exec_cmd.h"
8 #include "pack.h"
9 #include "sideband.h"
10 #include "fetch-pack.h"
11 #include "remote.h"
12 #include "run-command.h"
13 #include "connect.h"
14 #include "transport.h"
15 #include "version.h"
16 #include "prio-queue.h"
17 #include "sha1-array.h"
19 static int transfer_unpack_limit = -1;
20 static int fetch_unpack_limit = -1;
21 static int unpack_limit = 100;
22 static int prefer_ofs_delta = 1;
23 static int no_done;
24 static int fetch_fsck_objects = -1;
25 static int transfer_fsck_objects = -1;
26 static int agent_supported;
27 static struct lock_file shallow_lock;
28 static const char *alternate_shallow_file;
30 /* Remember to update object flag allocation in object.h */
31 #define COMPLETE (1U << 0)
32 #define COMMON (1U << 1)
33 #define COMMON_REF (1U << 2)
34 #define SEEN (1U << 3)
35 #define POPPED (1U << 4)
37 static int marked;
40 * After sending this many "have"s if we do not get any new ACK , we
41 * give up traversing our history.
43 #define MAX_IN_VAIN 256
45 static struct prio_queue rev_list = { compare_commits_by_commit_date };
46 static int non_common_revs, multi_ack, use_sideband;
47 /* Allow specifying sha1 if it is a ref tip. */
48 #define ALLOW_TIP_SHA1 01
49 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
50 #define ALLOW_REACHABLE_SHA1 02
51 static unsigned int allow_unadvertised_object_request;
53 static void rev_list_push(struct commit *commit, int mark)
55 if (!(commit->object.flags & mark)) {
56 commit->object.flags |= mark;
58 if (parse_commit(commit))
59 return;
61 prio_queue_put(&rev_list, commit);
63 if (!(commit->object.flags & COMMON))
64 non_common_revs++;
68 static int rev_list_insert_ref(const char *refname, const unsigned char *sha1)
70 struct object *o = deref_tag(parse_object(sha1), refname, 0);
72 if (o && o->type == OBJ_COMMIT)
73 rev_list_push((struct commit *)o, SEEN);
75 return 0;
78 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
79 int flag, void *cb_data)
81 return rev_list_insert_ref(refname, oid->hash);
84 static int clear_marks(const char *refname, const struct object_id *oid,
85 int flag, void *cb_data)
87 struct object *o = deref_tag(parse_object(oid->hash), refname, 0);
89 if (o && o->type == OBJ_COMMIT)
90 clear_commit_marks((struct commit *)o,
91 COMMON | COMMON_REF | SEEN | POPPED);
92 return 0;
96 This function marks a rev and its ancestors as common.
97 In some cases, it is desirable to mark only the ancestors (for example
98 when only the server does not yet know that they are common).
101 static void mark_common(struct commit *commit,
102 int ancestors_only, int dont_parse)
104 if (commit != NULL && !(commit->object.flags & COMMON)) {
105 struct object *o = (struct object *)commit;
107 if (!ancestors_only)
108 o->flags |= COMMON;
110 if (!(o->flags & SEEN))
111 rev_list_push(commit, SEEN);
112 else {
113 struct commit_list *parents;
115 if (!ancestors_only && !(o->flags & POPPED))
116 non_common_revs--;
117 if (!o->parsed && !dont_parse)
118 if (parse_commit(commit))
119 return;
121 for (parents = commit->parents;
122 parents;
123 parents = parents->next)
124 mark_common(parents->item, 0, dont_parse);
130 Get the next rev to send, ignoring the common.
133 static const unsigned char *get_rev(void)
135 struct commit *commit = NULL;
137 while (commit == NULL) {
138 unsigned int mark;
139 struct commit_list *parents;
141 if (rev_list.nr == 0 || non_common_revs == 0)
142 return NULL;
144 commit = prio_queue_get(&rev_list);
145 parse_commit(commit);
146 parents = commit->parents;
148 commit->object.flags |= POPPED;
149 if (!(commit->object.flags & COMMON))
150 non_common_revs--;
152 if (commit->object.flags & COMMON) {
153 /* do not send "have", and ignore ancestors */
154 commit = NULL;
155 mark = COMMON | SEEN;
156 } else if (commit->object.flags & COMMON_REF)
157 /* send "have", and ignore ancestors */
158 mark = COMMON | SEEN;
159 else
160 /* send "have", also for its ancestors */
161 mark = SEEN;
163 while (parents) {
164 if (!(parents->item->object.flags & SEEN))
165 rev_list_push(parents->item, mark);
166 if (mark & COMMON)
167 mark_common(parents->item, 1, 0);
168 parents = parents->next;
172 return commit->object.sha1;
175 enum ack_type {
176 NAK = 0,
177 ACK,
178 ACK_continue,
179 ACK_common,
180 ACK_ready
183 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
185 if (args->stateless_rpc && args->depth > 0) {
186 /* If we sent a depth we will get back "duplicate"
187 * shallow and unshallow commands every time there
188 * is a block of have lines exchanged.
190 char *line;
191 while ((line = packet_read_line(fd, NULL))) {
192 if (starts_with(line, "shallow "))
193 continue;
194 if (starts_with(line, "unshallow "))
195 continue;
196 die("git fetch-pack: expected shallow list");
201 static enum ack_type get_ack(int fd, unsigned char *result_sha1)
203 int len;
204 char *line = packet_read_line(fd, &len);
205 const char *arg;
207 if (!len)
208 die("git fetch-pack: expected ACK/NAK, got EOF");
209 if (!strcmp(line, "NAK"))
210 return NAK;
211 if (skip_prefix(line, "ACK ", &arg)) {
212 if (!get_sha1_hex(arg, result_sha1)) {
213 arg += 40;
214 len -= arg - line;
215 if (len < 1)
216 return ACK;
217 if (strstr(arg, "continue"))
218 return ACK_continue;
219 if (strstr(arg, "common"))
220 return ACK_common;
221 if (strstr(arg, "ready"))
222 return ACK_ready;
223 return ACK;
226 die("git fetch_pack: expected ACK/NAK, got '%s'", line);
229 static void send_request(struct fetch_pack_args *args,
230 int fd, struct strbuf *buf)
232 if (args->stateless_rpc) {
233 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
234 packet_flush(fd);
235 } else
236 write_or_die(fd, buf->buf, buf->len);
239 static void insert_one_alternate_ref(const struct ref *ref, void *unused)
241 rev_list_insert_ref(NULL, ref->old_sha1);
244 #define INITIAL_FLUSH 16
245 #define PIPESAFE_FLUSH 32
246 #define LARGE_FLUSH 1024
248 static int next_flush(struct fetch_pack_args *args, int count)
250 int flush_limit = args->stateless_rpc ? LARGE_FLUSH : PIPESAFE_FLUSH;
252 if (count < flush_limit)
253 count <<= 1;
254 else
255 count += flush_limit;
256 return count;
259 static int find_common(struct fetch_pack_args *args,
260 int fd[2], unsigned char *result_sha1,
261 struct ref *refs)
263 int fetching;
264 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
265 const unsigned char *sha1;
266 unsigned in_vain = 0;
267 int got_continue = 0;
268 int got_ready = 0;
269 struct strbuf req_buf = STRBUF_INIT;
270 size_t state_len = 0;
272 if (args->stateless_rpc && multi_ack == 1)
273 die("--stateless-rpc requires multi_ack_detailed");
274 if (marked)
275 for_each_ref(clear_marks, NULL);
276 marked = 1;
278 for_each_ref(rev_list_insert_ref_oid, NULL);
279 for_each_alternate_ref(insert_one_alternate_ref, NULL);
281 fetching = 0;
282 for ( ; refs ; refs = refs->next) {
283 unsigned char *remote = refs->old_sha1;
284 const char *remote_hex;
285 struct object *o;
288 * If that object is complete (i.e. it is an ancestor of a
289 * local ref), we tell them we have it but do not have to
290 * tell them about its ancestors, which they already know
291 * about.
293 * We use lookup_object here because we are only
294 * interested in the case we *know* the object is
295 * reachable and we have already scanned it.
297 if (((o = lookup_object(remote)) != NULL) &&
298 (o->flags & COMPLETE)) {
299 continue;
302 remote_hex = sha1_to_hex(remote);
303 if (!fetching) {
304 struct strbuf c = STRBUF_INIT;
305 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
306 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
307 if (no_done) strbuf_addstr(&c, " no-done");
308 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
309 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
310 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
311 if (args->no_progress) strbuf_addstr(&c, " no-progress");
312 if (args->include_tag) strbuf_addstr(&c, " include-tag");
313 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
314 if (agent_supported) strbuf_addf(&c, " agent=%s",
315 git_user_agent_sanitized());
316 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
317 strbuf_release(&c);
318 } else
319 packet_buf_write(&req_buf, "want %s\n", remote_hex);
320 fetching++;
323 if (!fetching) {
324 strbuf_release(&req_buf);
325 packet_flush(fd[1]);
326 return 1;
329 if (is_repository_shallow())
330 write_shallow_commits(&req_buf, 1, NULL);
331 if (args->depth > 0)
332 packet_buf_write(&req_buf, "deepen %d", args->depth);
333 packet_buf_flush(&req_buf);
334 state_len = req_buf.len;
336 if (args->depth > 0) {
337 char *line;
338 const char *arg;
339 unsigned char sha1[20];
341 send_request(args, fd[1], &req_buf);
342 while ((line = packet_read_line(fd[0], NULL))) {
343 if (skip_prefix(line, "shallow ", &arg)) {
344 if (get_sha1_hex(arg, sha1))
345 die("invalid shallow line: %s", line);
346 register_shallow(sha1);
347 continue;
349 if (skip_prefix(line, "unshallow ", &arg)) {
350 if (get_sha1_hex(arg, sha1))
351 die("invalid unshallow line: %s", line);
352 if (!lookup_object(sha1))
353 die("object not found: %s", line);
354 /* make sure that it is parsed as shallow */
355 if (!parse_object(sha1))
356 die("error in object: %s", line);
357 if (unregister_shallow(sha1))
358 die("no shallow found: %s", line);
359 continue;
361 die("expected shallow/unshallow, got %s", line);
363 } else if (!args->stateless_rpc)
364 send_request(args, fd[1], &req_buf);
366 if (!args->stateless_rpc) {
367 /* If we aren't using the stateless-rpc interface
368 * we don't need to retain the headers.
370 strbuf_setlen(&req_buf, 0);
371 state_len = 0;
374 flushes = 0;
375 retval = -1;
376 while ((sha1 = get_rev())) {
377 packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
378 if (args->verbose)
379 fprintf(stderr, "have %s\n", sha1_to_hex(sha1));
380 in_vain++;
381 if (flush_at <= ++count) {
382 int ack;
384 packet_buf_flush(&req_buf);
385 send_request(args, fd[1], &req_buf);
386 strbuf_setlen(&req_buf, state_len);
387 flushes++;
388 flush_at = next_flush(args, count);
391 * We keep one window "ahead" of the other side, and
392 * will wait for an ACK only on the next one
394 if (!args->stateless_rpc && count == INITIAL_FLUSH)
395 continue;
397 consume_shallow_list(args, fd[0]);
398 do {
399 ack = get_ack(fd[0], result_sha1);
400 if (args->verbose && ack)
401 fprintf(stderr, "got ack %d %s\n", ack,
402 sha1_to_hex(result_sha1));
403 switch (ack) {
404 case ACK:
405 flushes = 0;
406 multi_ack = 0;
407 retval = 0;
408 goto done;
409 case ACK_common:
410 case ACK_ready:
411 case ACK_continue: {
412 struct commit *commit =
413 lookup_commit(result_sha1);
414 if (!commit)
415 die("invalid commit %s", sha1_to_hex(result_sha1));
416 if (args->stateless_rpc
417 && ack == ACK_common
418 && !(commit->object.flags & COMMON)) {
419 /* We need to replay the have for this object
420 * on the next RPC request so the peer knows
421 * it is in common with us.
423 const char *hex = sha1_to_hex(result_sha1);
424 packet_buf_write(&req_buf, "have %s\n", hex);
425 state_len = req_buf.len;
427 mark_common(commit, 0, 1);
428 retval = 0;
429 in_vain = 0;
430 got_continue = 1;
431 if (ack == ACK_ready) {
432 clear_prio_queue(&rev_list);
433 got_ready = 1;
435 break;
438 } while (ack);
439 flushes--;
440 if (got_continue && MAX_IN_VAIN < in_vain) {
441 if (args->verbose)
442 fprintf(stderr, "giving up\n");
443 break; /* give up */
447 done:
448 if (!got_ready || !no_done) {
449 packet_buf_write(&req_buf, "done\n");
450 send_request(args, fd[1], &req_buf);
452 if (args->verbose)
453 fprintf(stderr, "done\n");
454 if (retval != 0) {
455 multi_ack = 0;
456 flushes++;
458 strbuf_release(&req_buf);
460 if (!got_ready || !no_done)
461 consume_shallow_list(args, fd[0]);
462 while (flushes || multi_ack) {
463 int ack = get_ack(fd[0], result_sha1);
464 if (ack) {
465 if (args->verbose)
466 fprintf(stderr, "got ack (%d) %s\n", ack,
467 sha1_to_hex(result_sha1));
468 if (ack == ACK)
469 return 0;
470 multi_ack = 1;
471 continue;
473 flushes--;
475 /* it is no error to fetch into a completely empty repo */
476 return count ? retval : 0;
479 static struct commit_list *complete;
481 static int mark_complete(const unsigned char *sha1)
483 struct object *o = parse_object(sha1);
485 while (o && o->type == OBJ_TAG) {
486 struct tag *t = (struct tag *) o;
487 if (!t->tagged)
488 break; /* broken repository */
489 o->flags |= COMPLETE;
490 o = parse_object(t->tagged->sha1);
492 if (o && o->type == OBJ_COMMIT) {
493 struct commit *commit = (struct commit *)o;
494 if (!(commit->object.flags & COMPLETE)) {
495 commit->object.flags |= COMPLETE;
496 commit_list_insert(commit, &complete);
499 return 0;
502 static int mark_complete_oid(const char *refname, const struct object_id *oid,
503 int flag, void *cb_data)
505 return mark_complete(oid->hash);
508 static void mark_recent_complete_commits(struct fetch_pack_args *args,
509 unsigned long cutoff)
511 while (complete && cutoff <= complete->item->date) {
512 if (args->verbose)
513 fprintf(stderr, "Marking %s as complete\n",
514 sha1_to_hex(complete->item->object.sha1));
515 pop_most_recent_commit(&complete, COMPLETE);
519 static void filter_refs(struct fetch_pack_args *args,
520 struct ref **refs,
521 struct ref **sought, int nr_sought)
523 struct ref *newlist = NULL;
524 struct ref **newtail = &newlist;
525 struct ref *ref, *next;
526 int i;
528 i = 0;
529 for (ref = *refs; ref; ref = next) {
530 int keep = 0;
531 next = ref->next;
533 if (starts_with(ref->name, "refs/") &&
534 check_refname_format(ref->name, 0))
535 ; /* trash */
536 else {
537 while (i < nr_sought) {
538 int cmp = strcmp(ref->name, sought[i]->name);
539 if (cmp < 0)
540 break; /* definitely do not have it */
541 else if (cmp == 0) {
542 keep = 1; /* definitely have it */
543 sought[i]->matched = 1;
545 i++;
549 if (!keep && args->fetch_all &&
550 (!args->depth || !starts_with(ref->name, "refs/tags/")))
551 keep = 1;
553 if (keep) {
554 *newtail = ref;
555 ref->next = NULL;
556 newtail = &ref->next;
557 } else {
558 free(ref);
562 /* Append unmatched requests to the list */
563 if ((allow_unadvertised_object_request &
564 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1))) {
565 for (i = 0; i < nr_sought; i++) {
566 unsigned char sha1[20];
568 ref = sought[i];
569 if (ref->matched)
570 continue;
571 if (get_sha1_hex(ref->name, sha1) ||
572 ref->name[40] != '\0' ||
573 hashcmp(sha1, ref->old_sha1))
574 continue;
576 ref->matched = 1;
577 *newtail = copy_ref(ref);
578 newtail = &(*newtail)->next;
581 *refs = newlist;
584 static void mark_alternate_complete(const struct ref *ref, void *unused)
586 mark_complete(ref->old_sha1);
589 static int everything_local(struct fetch_pack_args *args,
590 struct ref **refs,
591 struct ref **sought, int nr_sought)
593 struct ref *ref;
594 int retval;
595 unsigned long cutoff = 0;
597 save_commit_buffer = 0;
599 for (ref = *refs; ref; ref = ref->next) {
600 struct object *o;
602 if (!has_sha1_file(ref->old_sha1))
603 continue;
605 o = parse_object(ref->old_sha1);
606 if (!o)
607 continue;
609 /* We already have it -- which may mean that we were
610 * in sync with the other side at some time after
611 * that (it is OK if we guess wrong here).
613 if (o->type == OBJ_COMMIT) {
614 struct commit *commit = (struct commit *)o;
615 if (!cutoff || cutoff < commit->date)
616 cutoff = commit->date;
620 if (!args->depth) {
621 for_each_ref(mark_complete_oid, NULL);
622 for_each_alternate_ref(mark_alternate_complete, NULL);
623 commit_list_sort_by_date(&complete);
624 if (cutoff)
625 mark_recent_complete_commits(args, cutoff);
629 * Mark all complete remote refs as common refs.
630 * Don't mark them common yet; the server has to be told so first.
632 for (ref = *refs; ref; ref = ref->next) {
633 struct object *o = deref_tag(lookup_object(ref->old_sha1),
634 NULL, 0);
636 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
637 continue;
639 if (!(o->flags & SEEN)) {
640 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
642 mark_common((struct commit *)o, 1, 1);
646 filter_refs(args, refs, sought, nr_sought);
648 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
649 const unsigned char *remote = ref->old_sha1;
650 struct object *o;
652 o = lookup_object(remote);
653 if (!o || !(o->flags & COMPLETE)) {
654 retval = 0;
655 if (!args->verbose)
656 continue;
657 fprintf(stderr,
658 "want %s (%s)\n", sha1_to_hex(remote),
659 ref->name);
660 continue;
662 if (!args->verbose)
663 continue;
664 fprintf(stderr,
665 "already have %s (%s)\n", sha1_to_hex(remote),
666 ref->name);
668 return retval;
671 static int sideband_demux(int in, int out, void *data)
673 int *xd = data;
675 int ret = recv_sideband("fetch-pack", xd[0], out);
676 close(out);
677 return ret;
680 static int get_pack(struct fetch_pack_args *args,
681 int xd[2], char **pack_lockfile)
683 struct async demux;
684 int do_keep = args->keep_pack;
685 const char *cmd_name;
686 struct pack_header header;
687 int pass_header = 0;
688 struct child_process cmd = CHILD_PROCESS_INIT;
689 int ret;
691 memset(&demux, 0, sizeof(demux));
692 if (use_sideband) {
693 /* xd[] is talking with upload-pack; subprocess reads from
694 * xd[0], spits out band#2 to stderr, and feeds us band#1
695 * through demux->out.
697 demux.proc = sideband_demux;
698 demux.data = xd;
699 demux.out = -1;
700 if (start_async(&demux))
701 die("fetch-pack: unable to fork off sideband"
702 " demultiplexer");
704 else
705 demux.out = xd[0];
707 if (!args->keep_pack && unpack_limit) {
709 if (read_pack_header(demux.out, &header))
710 die("protocol error: bad pack header");
711 pass_header = 1;
712 if (ntohl(header.hdr_entries) < unpack_limit)
713 do_keep = 0;
714 else
715 do_keep = 1;
718 if (alternate_shallow_file) {
719 argv_array_push(&cmd.args, "--shallow-file");
720 argv_array_push(&cmd.args, alternate_shallow_file);
723 if (do_keep) {
724 if (pack_lockfile)
725 cmd.out = -1;
726 cmd_name = "index-pack";
727 argv_array_push(&cmd.args, cmd_name);
728 argv_array_push(&cmd.args, "--stdin");
729 if (!args->quiet && !args->no_progress)
730 argv_array_push(&cmd.args, "-v");
731 if (args->use_thin_pack)
732 argv_array_push(&cmd.args, "--fix-thin");
733 if (args->lock_pack || unpack_limit) {
734 char hostname[256];
735 if (gethostname(hostname, sizeof(hostname)))
736 xsnprintf(hostname, sizeof(hostname), "localhost");
737 argv_array_pushf(&cmd.args,
738 "--keep=fetch-pack %"PRIuMAX " on %s",
739 (uintmax_t)getpid(), hostname);
741 if (args->check_self_contained_and_connected)
742 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
744 else {
745 cmd_name = "unpack-objects";
746 argv_array_push(&cmd.args, cmd_name);
747 if (args->quiet || args->no_progress)
748 argv_array_push(&cmd.args, "-q");
749 args->check_self_contained_and_connected = 0;
752 if (pass_header)
753 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
754 ntohl(header.hdr_version),
755 ntohl(header.hdr_entries));
756 if (fetch_fsck_objects >= 0
757 ? fetch_fsck_objects
758 : transfer_fsck_objects >= 0
759 ? transfer_fsck_objects
760 : 0)
761 argv_array_push(&cmd.args, "--strict");
763 cmd.in = demux.out;
764 cmd.git_cmd = 1;
765 if (start_command(&cmd))
766 die("fetch-pack: unable to fork off %s", cmd_name);
767 if (do_keep && pack_lockfile) {
768 *pack_lockfile = index_pack_lockfile(cmd.out);
769 close(cmd.out);
772 if (!use_sideband)
773 /* Closed by start_command() */
774 xd[0] = -1;
776 ret = finish_command(&cmd);
777 if (!ret || (args->check_self_contained_and_connected && ret == 1))
778 args->self_contained_and_connected =
779 args->check_self_contained_and_connected &&
780 ret == 0;
781 else
782 die("%s failed", cmd_name);
783 if (use_sideband && finish_async(&demux))
784 die("error in sideband demultiplexer");
785 return 0;
788 static int cmp_ref_by_name(const void *a_, const void *b_)
790 const struct ref *a = *((const struct ref **)a_);
791 const struct ref *b = *((const struct ref **)b_);
792 return strcmp(a->name, b->name);
795 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
796 int fd[2],
797 const struct ref *orig_ref,
798 struct ref **sought, int nr_sought,
799 struct shallow_info *si,
800 char **pack_lockfile)
802 struct ref *ref = copy_ref_list(orig_ref);
803 unsigned char sha1[20];
804 const char *agent_feature;
805 int agent_len;
807 sort_ref_list(&ref, ref_compare_name);
808 qsort(sought, nr_sought, sizeof(*sought), cmp_ref_by_name);
810 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
811 die("Server does not support shallow clients");
812 if (server_supports("multi_ack_detailed")) {
813 if (args->verbose)
814 fprintf(stderr, "Server supports multi_ack_detailed\n");
815 multi_ack = 2;
816 if (server_supports("no-done")) {
817 if (args->verbose)
818 fprintf(stderr, "Server supports no-done\n");
819 if (args->stateless_rpc)
820 no_done = 1;
823 else if (server_supports("multi_ack")) {
824 if (args->verbose)
825 fprintf(stderr, "Server supports multi_ack\n");
826 multi_ack = 1;
828 if (server_supports("side-band-64k")) {
829 if (args->verbose)
830 fprintf(stderr, "Server supports side-band-64k\n");
831 use_sideband = 2;
833 else if (server_supports("side-band")) {
834 if (args->verbose)
835 fprintf(stderr, "Server supports side-band\n");
836 use_sideband = 1;
838 if (server_supports("allow-tip-sha1-in-want")) {
839 if (args->verbose)
840 fprintf(stderr, "Server supports allow-tip-sha1-in-want\n");
841 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
843 if (server_supports("allow-reachable-sha1-in-want")) {
844 if (args->verbose)
845 fprintf(stderr, "Server supports allow-reachable-sha1-in-want\n");
846 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
848 if (!server_supports("thin-pack"))
849 args->use_thin_pack = 0;
850 if (!server_supports("no-progress"))
851 args->no_progress = 0;
852 if (!server_supports("include-tag"))
853 args->include_tag = 0;
854 if (server_supports("ofs-delta")) {
855 if (args->verbose)
856 fprintf(stderr, "Server supports ofs-delta\n");
857 } else
858 prefer_ofs_delta = 0;
860 if ((agent_feature = server_feature_value("agent", &agent_len))) {
861 agent_supported = 1;
862 if (args->verbose && agent_len)
863 fprintf(stderr, "Server version is %.*s\n",
864 agent_len, agent_feature);
867 if (everything_local(args, &ref, sought, nr_sought)) {
868 packet_flush(fd[1]);
869 goto all_done;
871 if (find_common(args, fd, sha1, ref) < 0)
872 if (!args->keep_pack)
873 /* When cloning, it is not unusual to have
874 * no common commit.
876 warning("no common commits");
878 if (args->stateless_rpc)
879 packet_flush(fd[1]);
880 if (args->depth > 0)
881 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
882 NULL);
883 else if (si->nr_ours || si->nr_theirs)
884 alternate_shallow_file = setup_temporary_shallow(si->shallow);
885 else
886 alternate_shallow_file = NULL;
887 if (get_pack(args, fd, pack_lockfile))
888 die("git fetch-pack: fetch failed.");
890 all_done:
891 return ref;
894 static void fetch_pack_config(void)
896 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
897 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
898 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
899 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
900 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
902 git_config(git_default_config, NULL);
905 static void fetch_pack_setup(void)
907 static int did_setup;
908 if (did_setup)
909 return;
910 fetch_pack_config();
911 if (0 <= transfer_unpack_limit)
912 unpack_limit = transfer_unpack_limit;
913 else if (0 <= fetch_unpack_limit)
914 unpack_limit = fetch_unpack_limit;
915 did_setup = 1;
918 static int remove_duplicates_in_refs(struct ref **ref, int nr)
920 struct string_list names = STRING_LIST_INIT_NODUP;
921 int src, dst;
923 for (src = dst = 0; src < nr; src++) {
924 struct string_list_item *item;
925 item = string_list_insert(&names, ref[src]->name);
926 if (item->util)
927 continue; /* already have it */
928 item->util = ref[src];
929 if (src != dst)
930 ref[dst] = ref[src];
931 dst++;
933 for (src = dst; src < nr; src++)
934 ref[src] = NULL;
935 string_list_clear(&names, 0);
936 return dst;
939 static void update_shallow(struct fetch_pack_args *args,
940 struct ref **sought, int nr_sought,
941 struct shallow_info *si)
943 struct sha1_array ref = SHA1_ARRAY_INIT;
944 int *status;
945 int i;
947 if (args->depth > 0 && alternate_shallow_file) {
948 if (*alternate_shallow_file == '\0') { /* --unshallow */
949 unlink_or_warn(git_path_shallow());
950 rollback_lock_file(&shallow_lock);
951 } else
952 commit_lock_file(&shallow_lock);
953 return;
956 if (!si->shallow || !si->shallow->nr)
957 return;
959 if (args->cloning) {
961 * remote is shallow, but this is a clone, there are
962 * no objects in repo to worry about. Accept any
963 * shallow points that exist in the pack (iow in repo
964 * after get_pack() and reprepare_packed_git())
966 struct sha1_array extra = SHA1_ARRAY_INIT;
967 unsigned char (*sha1)[20] = si->shallow->sha1;
968 for (i = 0; i < si->shallow->nr; i++)
969 if (has_sha1_file(sha1[i]))
970 sha1_array_append(&extra, sha1[i]);
971 if (extra.nr) {
972 setup_alternate_shallow(&shallow_lock,
973 &alternate_shallow_file,
974 &extra);
975 commit_lock_file(&shallow_lock);
977 sha1_array_clear(&extra);
978 return;
981 if (!si->nr_ours && !si->nr_theirs)
982 return;
984 remove_nonexistent_theirs_shallow(si);
985 if (!si->nr_ours && !si->nr_theirs)
986 return;
987 for (i = 0; i < nr_sought; i++)
988 sha1_array_append(&ref, sought[i]->old_sha1);
989 si->ref = &ref;
991 if (args->update_shallow) {
993 * remote is also shallow, .git/shallow may be updated
994 * so all refs can be accepted. Make sure we only add
995 * shallow roots that are actually reachable from new
996 * refs.
998 struct sha1_array extra = SHA1_ARRAY_INIT;
999 unsigned char (*sha1)[20] = si->shallow->sha1;
1000 assign_shallow_commits_to_refs(si, NULL, NULL);
1001 if (!si->nr_ours && !si->nr_theirs) {
1002 sha1_array_clear(&ref);
1003 return;
1005 for (i = 0; i < si->nr_ours; i++)
1006 sha1_array_append(&extra, sha1[si->ours[i]]);
1007 for (i = 0; i < si->nr_theirs; i++)
1008 sha1_array_append(&extra, sha1[si->theirs[i]]);
1009 setup_alternate_shallow(&shallow_lock,
1010 &alternate_shallow_file,
1011 &extra);
1012 commit_lock_file(&shallow_lock);
1013 sha1_array_clear(&extra);
1014 sha1_array_clear(&ref);
1015 return;
1019 * remote is also shallow, check what ref is safe to update
1020 * without updating .git/shallow
1022 status = xcalloc(nr_sought, sizeof(*status));
1023 assign_shallow_commits_to_refs(si, NULL, status);
1024 if (si->nr_ours || si->nr_theirs) {
1025 for (i = 0; i < nr_sought; i++)
1026 if (status[i])
1027 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1029 free(status);
1030 sha1_array_clear(&ref);
1033 struct ref *fetch_pack(struct fetch_pack_args *args,
1034 int fd[], struct child_process *conn,
1035 const struct ref *ref,
1036 const char *dest,
1037 struct ref **sought, int nr_sought,
1038 struct sha1_array *shallow,
1039 char **pack_lockfile)
1041 struct ref *ref_cpy;
1042 struct shallow_info si;
1044 fetch_pack_setup();
1045 if (nr_sought)
1046 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1048 if (!ref) {
1049 packet_flush(fd[1]);
1050 die("no matching remote head");
1052 prepare_shallow_info(&si, shallow);
1053 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1054 &si, pack_lockfile);
1055 reprepare_packed_git();
1056 update_shallow(args, sought, nr_sought, &si);
1057 clear_shallow_info(&si);
1058 return ref_cpy;