Merge branch 'ld/p4-multiple-shelves'
[git/raj.git] / fetch-pack.c
blob9f6b07ad91f8c2c17e85004a0e71a8b69752d120
1 #include "cache.h"
2 #include "config.h"
3 #include "lockfile.h"
4 #include "refs.h"
5 #include "pkt-line.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "exec_cmd.h"
9 #include "pack.h"
10 #include "sideband.h"
11 #include "fetch-pack.h"
12 #include "remote.h"
13 #include "run-command.h"
14 #include "connect.h"
15 #include "transport.h"
16 #include "version.h"
17 #include "prio-queue.h"
18 #include "sha1-array.h"
19 #include "oidset.h"
20 #include "packfile.h"
22 static int transfer_unpack_limit = -1;
23 static int fetch_unpack_limit = -1;
24 static int unpack_limit = 100;
25 static int prefer_ofs_delta = 1;
26 static int no_done;
27 static int deepen_since_ok;
28 static int deepen_not_ok;
29 static int fetch_fsck_objects = -1;
30 static int transfer_fsck_objects = -1;
31 static int agent_supported;
32 static struct lock_file shallow_lock;
33 static const char *alternate_shallow_file;
35 /* Remember to update object flag allocation in object.h */
36 #define COMPLETE (1U << 0)
37 #define COMMON (1U << 1)
38 #define COMMON_REF (1U << 2)
39 #define SEEN (1U << 3)
40 #define POPPED (1U << 4)
41 #define ALTERNATE (1U << 5)
43 static int marked;
46 * After sending this many "have"s if we do not get any new ACK , we
47 * give up traversing our history.
49 #define MAX_IN_VAIN 256
51 static struct prio_queue rev_list = { compare_commits_by_commit_date };
52 static int non_common_revs, multi_ack, use_sideband;
53 /* Allow specifying sha1 if it is a ref tip. */
54 #define ALLOW_TIP_SHA1 01
55 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
56 #define ALLOW_REACHABLE_SHA1 02
57 static unsigned int allow_unadvertised_object_request;
59 __attribute__((format (printf, 2, 3)))
60 static inline void print_verbose(const struct fetch_pack_args *args,
61 const char *fmt, ...)
63 va_list params;
65 if (!args->verbose)
66 return;
68 va_start(params, fmt);
69 vfprintf(stderr, fmt, params);
70 va_end(params);
71 fputc('\n', stderr);
74 struct alternate_object_cache {
75 struct object **items;
76 size_t nr, alloc;
79 static void cache_one_alternate(const char *refname,
80 const struct object_id *oid,
81 void *vcache)
83 struct alternate_object_cache *cache = vcache;
84 struct object *obj = parse_object(oid);
86 if (!obj || (obj->flags & ALTERNATE))
87 return;
89 obj->flags |= ALTERNATE;
90 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
91 cache->items[cache->nr++] = obj;
94 static void for_each_cached_alternate(void (*cb)(struct object *))
96 static int initialized;
97 static struct alternate_object_cache cache;
98 size_t i;
100 if (!initialized) {
101 for_each_alternate_ref(cache_one_alternate, &cache);
102 initialized = 1;
105 for (i = 0; i < cache.nr; i++)
106 cb(cache.items[i]);
109 static void rev_list_push(struct commit *commit, int mark)
111 if (!(commit->object.flags & mark)) {
112 commit->object.flags |= mark;
114 if (parse_commit(commit))
115 return;
117 prio_queue_put(&rev_list, commit);
119 if (!(commit->object.flags & COMMON))
120 non_common_revs++;
124 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
126 struct object *o = deref_tag(parse_object(oid), refname, 0);
128 if (o && o->type == OBJ_COMMIT)
129 rev_list_push((struct commit *)o, SEEN);
131 return 0;
134 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
135 int flag, void *cb_data)
137 return rev_list_insert_ref(refname, oid);
140 static int clear_marks(const char *refname, const struct object_id *oid,
141 int flag, void *cb_data)
143 struct object *o = deref_tag(parse_object(oid), refname, 0);
145 if (o && o->type == OBJ_COMMIT)
146 clear_commit_marks((struct commit *)o,
147 COMMON | COMMON_REF | SEEN | POPPED);
148 return 0;
152 This function marks a rev and its ancestors as common.
153 In some cases, it is desirable to mark only the ancestors (for example
154 when only the server does not yet know that they are common).
157 static void mark_common(struct commit *commit,
158 int ancestors_only, int dont_parse)
160 if (commit != NULL && !(commit->object.flags & COMMON)) {
161 struct object *o = (struct object *)commit;
163 if (!ancestors_only)
164 o->flags |= COMMON;
166 if (!(o->flags & SEEN))
167 rev_list_push(commit, SEEN);
168 else {
169 struct commit_list *parents;
171 if (!ancestors_only && !(o->flags & POPPED))
172 non_common_revs--;
173 if (!o->parsed && !dont_parse)
174 if (parse_commit(commit))
175 return;
177 for (parents = commit->parents;
178 parents;
179 parents = parents->next)
180 mark_common(parents->item, 0, dont_parse);
186 Get the next rev to send, ignoring the common.
189 static const struct object_id *get_rev(void)
191 struct commit *commit = NULL;
193 while (commit == NULL) {
194 unsigned int mark;
195 struct commit_list *parents;
197 if (rev_list.nr == 0 || non_common_revs == 0)
198 return NULL;
200 commit = prio_queue_get(&rev_list);
201 parse_commit(commit);
202 parents = commit->parents;
204 commit->object.flags |= POPPED;
205 if (!(commit->object.flags & COMMON))
206 non_common_revs--;
208 if (commit->object.flags & COMMON) {
209 /* do not send "have", and ignore ancestors */
210 commit = NULL;
211 mark = COMMON | SEEN;
212 } else if (commit->object.flags & COMMON_REF)
213 /* send "have", and ignore ancestors */
214 mark = COMMON | SEEN;
215 else
216 /* send "have", also for its ancestors */
217 mark = SEEN;
219 while (parents) {
220 if (!(parents->item->object.flags & SEEN))
221 rev_list_push(parents->item, mark);
222 if (mark & COMMON)
223 mark_common(parents->item, 1, 0);
224 parents = parents->next;
228 return &commit->object.oid;
231 enum ack_type {
232 NAK = 0,
233 ACK,
234 ACK_continue,
235 ACK_common,
236 ACK_ready
239 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
241 if (args->stateless_rpc && args->deepen) {
242 /* If we sent a depth we will get back "duplicate"
243 * shallow and unshallow commands every time there
244 * is a block of have lines exchanged.
246 char *line;
247 while ((line = packet_read_line(fd, NULL))) {
248 if (starts_with(line, "shallow "))
249 continue;
250 if (starts_with(line, "unshallow "))
251 continue;
252 die(_("git fetch-pack: expected shallow list"));
257 static enum ack_type get_ack(int fd, struct object_id *result_oid)
259 int len;
260 char *line = packet_read_line(fd, &len);
261 const char *arg;
263 if (!len)
264 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
265 if (!strcmp(line, "NAK"))
266 return NAK;
267 if (skip_prefix(line, "ACK ", &arg)) {
268 if (!get_oid_hex(arg, result_oid)) {
269 arg += 40;
270 len -= arg - line;
271 if (len < 1)
272 return ACK;
273 if (strstr(arg, "continue"))
274 return ACK_continue;
275 if (strstr(arg, "common"))
276 return ACK_common;
277 if (strstr(arg, "ready"))
278 return ACK_ready;
279 return ACK;
282 if (skip_prefix(line, "ERR ", &arg))
283 die(_("remote error: %s"), arg);
284 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
287 static void send_request(struct fetch_pack_args *args,
288 int fd, struct strbuf *buf)
290 if (args->stateless_rpc) {
291 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
292 packet_flush(fd);
293 } else
294 write_or_die(fd, buf->buf, buf->len);
297 static void insert_one_alternate_object(struct object *obj)
299 rev_list_insert_ref(NULL, &obj->oid);
302 #define INITIAL_FLUSH 16
303 #define PIPESAFE_FLUSH 32
304 #define LARGE_FLUSH 16384
306 static int next_flush(struct fetch_pack_args *args, int count)
308 if (args->stateless_rpc) {
309 if (count < LARGE_FLUSH)
310 count <<= 1;
311 else
312 count = count * 11 / 10;
313 } else {
314 if (count < PIPESAFE_FLUSH)
315 count <<= 1;
316 else
317 count += PIPESAFE_FLUSH;
319 return count;
322 static int find_common(struct fetch_pack_args *args,
323 int fd[2], struct object_id *result_oid,
324 struct ref *refs)
326 int fetching;
327 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
328 const struct object_id *oid;
329 unsigned in_vain = 0;
330 int got_continue = 0;
331 int got_ready = 0;
332 struct strbuf req_buf = STRBUF_INIT;
333 size_t state_len = 0;
335 if (args->stateless_rpc && multi_ack == 1)
336 die(_("--stateless-rpc requires multi_ack_detailed"));
337 if (marked)
338 for_each_ref(clear_marks, NULL);
339 marked = 1;
341 for_each_ref(rev_list_insert_ref_oid, NULL);
342 for_each_cached_alternate(insert_one_alternate_object);
344 fetching = 0;
345 for ( ; refs ; refs = refs->next) {
346 struct object_id *remote = &refs->old_oid;
347 const char *remote_hex;
348 struct object *o;
351 * If that object is complete (i.e. it is an ancestor of a
352 * local ref), we tell them we have it but do not have to
353 * tell them about its ancestors, which they already know
354 * about.
356 * We use lookup_object here because we are only
357 * interested in the case we *know* the object is
358 * reachable and we have already scanned it.
360 if (((o = lookup_object(remote->hash)) != NULL) &&
361 (o->flags & COMPLETE)) {
362 continue;
365 remote_hex = oid_to_hex(remote);
366 if (!fetching) {
367 struct strbuf c = STRBUF_INIT;
368 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
369 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
370 if (no_done) strbuf_addstr(&c, " no-done");
371 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
372 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
373 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
374 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
375 if (args->no_progress) strbuf_addstr(&c, " no-progress");
376 if (args->include_tag) strbuf_addstr(&c, " include-tag");
377 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
378 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
379 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
380 if (agent_supported) strbuf_addf(&c, " agent=%s",
381 git_user_agent_sanitized());
382 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
383 strbuf_release(&c);
384 } else
385 packet_buf_write(&req_buf, "want %s\n", remote_hex);
386 fetching++;
389 if (!fetching) {
390 strbuf_release(&req_buf);
391 packet_flush(fd[1]);
392 return 1;
395 if (is_repository_shallow())
396 write_shallow_commits(&req_buf, 1, NULL);
397 if (args->depth > 0)
398 packet_buf_write(&req_buf, "deepen %d", args->depth);
399 if (args->deepen_since) {
400 timestamp_t max_age = approxidate(args->deepen_since);
401 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
403 if (args->deepen_not) {
404 int i;
405 for (i = 0; i < args->deepen_not->nr; i++) {
406 struct string_list_item *s = args->deepen_not->items + i;
407 packet_buf_write(&req_buf, "deepen-not %s", s->string);
410 packet_buf_flush(&req_buf);
411 state_len = req_buf.len;
413 if (args->deepen) {
414 char *line;
415 const char *arg;
416 struct object_id oid;
418 send_request(args, fd[1], &req_buf);
419 while ((line = packet_read_line(fd[0], NULL))) {
420 if (skip_prefix(line, "shallow ", &arg)) {
421 if (get_oid_hex(arg, &oid))
422 die(_("invalid shallow line: %s"), line);
423 register_shallow(&oid);
424 continue;
426 if (skip_prefix(line, "unshallow ", &arg)) {
427 if (get_oid_hex(arg, &oid))
428 die(_("invalid unshallow line: %s"), line);
429 if (!lookup_object(oid.hash))
430 die(_("object not found: %s"), line);
431 /* make sure that it is parsed as shallow */
432 if (!parse_object(&oid))
433 die(_("error in object: %s"), line);
434 if (unregister_shallow(&oid))
435 die(_("no shallow found: %s"), line);
436 continue;
438 die(_("expected shallow/unshallow, got %s"), line);
440 } else if (!args->stateless_rpc)
441 send_request(args, fd[1], &req_buf);
443 if (!args->stateless_rpc) {
444 /* If we aren't using the stateless-rpc interface
445 * we don't need to retain the headers.
447 strbuf_setlen(&req_buf, 0);
448 state_len = 0;
451 flushes = 0;
452 retval = -1;
453 while ((oid = get_rev())) {
454 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
455 print_verbose(args, "have %s", oid_to_hex(oid));
456 in_vain++;
457 if (flush_at <= ++count) {
458 int ack;
460 packet_buf_flush(&req_buf);
461 send_request(args, fd[1], &req_buf);
462 strbuf_setlen(&req_buf, state_len);
463 flushes++;
464 flush_at = next_flush(args, count);
467 * We keep one window "ahead" of the other side, and
468 * will wait for an ACK only on the next one
470 if (!args->stateless_rpc && count == INITIAL_FLUSH)
471 continue;
473 consume_shallow_list(args, fd[0]);
474 do {
475 ack = get_ack(fd[0], result_oid);
476 if (ack)
477 print_verbose(args, _("got %s %d %s"), "ack",
478 ack, oid_to_hex(result_oid));
479 switch (ack) {
480 case ACK:
481 flushes = 0;
482 multi_ack = 0;
483 retval = 0;
484 goto done;
485 case ACK_common:
486 case ACK_ready:
487 case ACK_continue: {
488 struct commit *commit =
489 lookup_commit(result_oid);
490 if (!commit)
491 die(_("invalid commit %s"), oid_to_hex(result_oid));
492 if (args->stateless_rpc
493 && ack == ACK_common
494 && !(commit->object.flags & COMMON)) {
495 /* We need to replay the have for this object
496 * on the next RPC request so the peer knows
497 * it is in common with us.
499 const char *hex = oid_to_hex(result_oid);
500 packet_buf_write(&req_buf, "have %s\n", hex);
501 state_len = req_buf.len;
503 * Reset in_vain because an ack
504 * for this commit has not been
505 * seen.
507 in_vain = 0;
508 } else if (!args->stateless_rpc
509 || ack != ACK_common)
510 in_vain = 0;
511 mark_common(commit, 0, 1);
512 retval = 0;
513 got_continue = 1;
514 if (ack == ACK_ready) {
515 clear_prio_queue(&rev_list);
516 got_ready = 1;
518 break;
521 } while (ack);
522 flushes--;
523 if (got_continue && MAX_IN_VAIN < in_vain) {
524 print_verbose(args, _("giving up"));
525 break; /* give up */
529 done:
530 if (!got_ready || !no_done) {
531 packet_buf_write(&req_buf, "done\n");
532 send_request(args, fd[1], &req_buf);
534 print_verbose(args, _("done"));
535 if (retval != 0) {
536 multi_ack = 0;
537 flushes++;
539 strbuf_release(&req_buf);
541 if (!got_ready || !no_done)
542 consume_shallow_list(args, fd[0]);
543 while (flushes || multi_ack) {
544 int ack = get_ack(fd[0], result_oid);
545 if (ack) {
546 print_verbose(args, _("got %s (%d) %s"), "ack",
547 ack, oid_to_hex(result_oid));
548 if (ack == ACK)
549 return 0;
550 multi_ack = 1;
551 continue;
553 flushes--;
555 /* it is no error to fetch into a completely empty repo */
556 return count ? retval : 0;
559 static struct commit_list *complete;
561 static int mark_complete(const struct object_id *oid)
563 struct object *o = parse_object(oid);
565 while (o && o->type == OBJ_TAG) {
566 struct tag *t = (struct tag *) o;
567 if (!t->tagged)
568 break; /* broken repository */
569 o->flags |= COMPLETE;
570 o = parse_object(&t->tagged->oid);
572 if (o && o->type == OBJ_COMMIT) {
573 struct commit *commit = (struct commit *)o;
574 if (!(commit->object.flags & COMPLETE)) {
575 commit->object.flags |= COMPLETE;
576 commit_list_insert(commit, &complete);
579 return 0;
582 static int mark_complete_oid(const char *refname, const struct object_id *oid,
583 int flag, void *cb_data)
585 return mark_complete(oid);
588 static void mark_recent_complete_commits(struct fetch_pack_args *args,
589 timestamp_t cutoff)
591 while (complete && cutoff <= complete->item->date) {
592 print_verbose(args, _("Marking %s as complete"),
593 oid_to_hex(&complete->item->object.oid));
594 pop_most_recent_commit(&complete, COMPLETE);
598 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
600 for (; refs; refs = refs->next)
601 oidset_insert(oids, &refs->old_oid);
604 static int tip_oids_contain(struct oidset *tip_oids,
605 struct ref *unmatched, struct ref *newlist,
606 const struct object_id *id)
609 * Note that this only looks at the ref lists the first time it's
610 * called. This works out in filter_refs() because even though it may
611 * add to "newlist" between calls, the additions will always be for
612 * oids that are already in the set.
614 if (!tip_oids->map.map.tablesize) {
615 add_refs_to_oidset(tip_oids, unmatched);
616 add_refs_to_oidset(tip_oids, newlist);
618 return oidset_contains(tip_oids, id);
621 static void filter_refs(struct fetch_pack_args *args,
622 struct ref **refs,
623 struct ref **sought, int nr_sought)
625 struct ref *newlist = NULL;
626 struct ref **newtail = &newlist;
627 struct ref *unmatched = NULL;
628 struct ref *ref, *next;
629 struct oidset tip_oids = OIDSET_INIT;
630 int i;
632 i = 0;
633 for (ref = *refs; ref; ref = next) {
634 int keep = 0;
635 next = ref->next;
637 if (starts_with(ref->name, "refs/") &&
638 check_refname_format(ref->name, 0))
639 ; /* trash */
640 else {
641 while (i < nr_sought) {
642 int cmp = strcmp(ref->name, sought[i]->name);
643 if (cmp < 0)
644 break; /* definitely do not have it */
645 else if (cmp == 0) {
646 keep = 1; /* definitely have it */
647 sought[i]->match_status = REF_MATCHED;
649 i++;
653 if (!keep && args->fetch_all &&
654 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
655 keep = 1;
657 if (keep) {
658 *newtail = ref;
659 ref->next = NULL;
660 newtail = &ref->next;
661 } else {
662 ref->next = unmatched;
663 unmatched = ref;
667 /* Append unmatched requests to the list */
668 for (i = 0; i < nr_sought; i++) {
669 struct object_id oid;
670 const char *p;
672 ref = sought[i];
673 if (ref->match_status != REF_NOT_MATCHED)
674 continue;
675 if (parse_oid_hex(ref->name, &oid, &p) ||
676 *p != '\0' ||
677 oidcmp(&oid, &ref->old_oid))
678 continue;
680 if ((allow_unadvertised_object_request &
681 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
682 tip_oids_contain(&tip_oids, unmatched, newlist,
683 &ref->old_oid)) {
684 ref->match_status = REF_MATCHED;
685 *newtail = copy_ref(ref);
686 newtail = &(*newtail)->next;
687 } else {
688 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
692 oidset_clear(&tip_oids);
693 for (ref = unmatched; ref; ref = next) {
694 next = ref->next;
695 free(ref);
698 *refs = newlist;
701 static void mark_alternate_complete(struct object *obj)
703 mark_complete(&obj->oid);
706 static int everything_local(struct fetch_pack_args *args,
707 struct ref **refs,
708 struct ref **sought, int nr_sought)
710 struct ref *ref;
711 int retval;
712 timestamp_t cutoff = 0;
714 save_commit_buffer = 0;
716 for (ref = *refs; ref; ref = ref->next) {
717 struct object *o;
719 if (!has_object_file_with_flags(&ref->old_oid,
720 OBJECT_INFO_QUICK))
721 continue;
723 o = parse_object(&ref->old_oid);
724 if (!o)
725 continue;
727 /* We already have it -- which may mean that we were
728 * in sync with the other side at some time after
729 * that (it is OK if we guess wrong here).
731 if (o->type == OBJ_COMMIT) {
732 struct commit *commit = (struct commit *)o;
733 if (!cutoff || cutoff < commit->date)
734 cutoff = commit->date;
738 if (!args->deepen) {
739 for_each_ref(mark_complete_oid, NULL);
740 for_each_cached_alternate(mark_alternate_complete);
741 commit_list_sort_by_date(&complete);
742 if (cutoff)
743 mark_recent_complete_commits(args, cutoff);
747 * Mark all complete remote refs as common refs.
748 * Don't mark them common yet; the server has to be told so first.
750 for (ref = *refs; ref; ref = ref->next) {
751 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
752 NULL, 0);
754 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
755 continue;
757 if (!(o->flags & SEEN)) {
758 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
760 mark_common((struct commit *)o, 1, 1);
764 filter_refs(args, refs, sought, nr_sought);
766 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
767 const struct object_id *remote = &ref->old_oid;
768 struct object *o;
770 o = lookup_object(remote->hash);
771 if (!o || !(o->flags & COMPLETE)) {
772 retval = 0;
773 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
774 ref->name);
775 continue;
777 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
778 ref->name);
780 return retval;
783 static int sideband_demux(int in, int out, void *data)
785 int *xd = data;
786 int ret;
788 ret = recv_sideband("fetch-pack", xd[0], out);
789 close(out);
790 return ret;
793 static int get_pack(struct fetch_pack_args *args,
794 int xd[2], char **pack_lockfile)
796 struct async demux;
797 int do_keep = args->keep_pack;
798 const char *cmd_name;
799 struct pack_header header;
800 int pass_header = 0;
801 struct child_process cmd = CHILD_PROCESS_INIT;
802 int ret;
804 memset(&demux, 0, sizeof(demux));
805 if (use_sideband) {
806 /* xd[] is talking with upload-pack; subprocess reads from
807 * xd[0], spits out band#2 to stderr, and feeds us band#1
808 * through demux->out.
810 demux.proc = sideband_demux;
811 demux.data = xd;
812 demux.out = -1;
813 demux.isolate_sigpipe = 1;
814 if (start_async(&demux))
815 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
817 else
818 demux.out = xd[0];
820 if (!args->keep_pack && unpack_limit) {
822 if (read_pack_header(demux.out, &header))
823 die(_("protocol error: bad pack header"));
824 pass_header = 1;
825 if (ntohl(header.hdr_entries) < unpack_limit)
826 do_keep = 0;
827 else
828 do_keep = 1;
831 if (alternate_shallow_file) {
832 argv_array_push(&cmd.args, "--shallow-file");
833 argv_array_push(&cmd.args, alternate_shallow_file);
836 if (do_keep) {
837 if (pack_lockfile)
838 cmd.out = -1;
839 cmd_name = "index-pack";
840 argv_array_push(&cmd.args, cmd_name);
841 argv_array_push(&cmd.args, "--stdin");
842 if (!args->quiet && !args->no_progress)
843 argv_array_push(&cmd.args, "-v");
844 if (args->use_thin_pack)
845 argv_array_push(&cmd.args, "--fix-thin");
846 if (args->lock_pack || unpack_limit) {
847 char hostname[HOST_NAME_MAX + 1];
848 if (xgethostname(hostname, sizeof(hostname)))
849 xsnprintf(hostname, sizeof(hostname), "localhost");
850 argv_array_pushf(&cmd.args,
851 "--keep=fetch-pack %"PRIuMAX " on %s",
852 (uintmax_t)getpid(), hostname);
854 if (args->check_self_contained_and_connected)
855 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
857 else {
858 cmd_name = "unpack-objects";
859 argv_array_push(&cmd.args, cmd_name);
860 if (args->quiet || args->no_progress)
861 argv_array_push(&cmd.args, "-q");
862 args->check_self_contained_and_connected = 0;
865 if (pass_header)
866 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
867 ntohl(header.hdr_version),
868 ntohl(header.hdr_entries));
869 if (fetch_fsck_objects >= 0
870 ? fetch_fsck_objects
871 : transfer_fsck_objects >= 0
872 ? transfer_fsck_objects
873 : 0)
874 argv_array_push(&cmd.args, "--strict");
876 cmd.in = demux.out;
877 cmd.git_cmd = 1;
878 if (start_command(&cmd))
879 die(_("fetch-pack: unable to fork off %s"), cmd_name);
880 if (do_keep && pack_lockfile) {
881 *pack_lockfile = index_pack_lockfile(cmd.out);
882 close(cmd.out);
885 if (!use_sideband)
886 /* Closed by start_command() */
887 xd[0] = -1;
889 ret = finish_command(&cmd);
890 if (!ret || (args->check_self_contained_and_connected && ret == 1))
891 args->self_contained_and_connected =
892 args->check_self_contained_and_connected &&
893 ret == 0;
894 else
895 die(_("%s failed"), cmd_name);
896 if (use_sideband && finish_async(&demux))
897 die(_("error in sideband demultiplexer"));
898 return 0;
901 static int cmp_ref_by_name(const void *a_, const void *b_)
903 const struct ref *a = *((const struct ref **)a_);
904 const struct ref *b = *((const struct ref **)b_);
905 return strcmp(a->name, b->name);
908 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
909 int fd[2],
910 const struct ref *orig_ref,
911 struct ref **sought, int nr_sought,
912 struct shallow_info *si,
913 char **pack_lockfile)
915 struct ref *ref = copy_ref_list(orig_ref);
916 struct object_id oid;
917 const char *agent_feature;
918 int agent_len;
920 sort_ref_list(&ref, ref_compare_name);
921 QSORT(sought, nr_sought, cmp_ref_by_name);
923 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
924 die(_("Server does not support shallow clients"));
925 if (args->depth > 0 || args->deepen_since || args->deepen_not)
926 args->deepen = 1;
927 if (server_supports("multi_ack_detailed")) {
928 print_verbose(args, _("Server supports multi_ack_detailed"));
929 multi_ack = 2;
930 if (server_supports("no-done")) {
931 print_verbose(args, _("Server supports no-done"));
932 if (args->stateless_rpc)
933 no_done = 1;
936 else if (server_supports("multi_ack")) {
937 print_verbose(args, _("Server supports multi_ack"));
938 multi_ack = 1;
940 if (server_supports("side-band-64k")) {
941 print_verbose(args, _("Server supports side-band-64k"));
942 use_sideband = 2;
944 else if (server_supports("side-band")) {
945 print_verbose(args, _("Server supports side-band"));
946 use_sideband = 1;
948 if (server_supports("allow-tip-sha1-in-want")) {
949 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
950 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
952 if (server_supports("allow-reachable-sha1-in-want")) {
953 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
954 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
956 if (!server_supports("thin-pack"))
957 args->use_thin_pack = 0;
958 if (!server_supports("no-progress"))
959 args->no_progress = 0;
960 if (!server_supports("include-tag"))
961 args->include_tag = 0;
962 if (server_supports("ofs-delta"))
963 print_verbose(args, _("Server supports ofs-delta"));
964 else
965 prefer_ofs_delta = 0;
967 if ((agent_feature = server_feature_value("agent", &agent_len))) {
968 agent_supported = 1;
969 if (agent_len)
970 print_verbose(args, _("Server version is %.*s"),
971 agent_len, agent_feature);
973 if (server_supports("deepen-since"))
974 deepen_since_ok = 1;
975 else if (args->deepen_since)
976 die(_("Server does not support --shallow-since"));
977 if (server_supports("deepen-not"))
978 deepen_not_ok = 1;
979 else if (args->deepen_not)
980 die(_("Server does not support --shallow-exclude"));
981 if (!server_supports("deepen-relative") && args->deepen_relative)
982 die(_("Server does not support --deepen"));
984 if (everything_local(args, &ref, sought, nr_sought)) {
985 packet_flush(fd[1]);
986 goto all_done;
988 if (find_common(args, fd, &oid, ref) < 0)
989 if (!args->keep_pack)
990 /* When cloning, it is not unusual to have
991 * no common commit.
993 warning(_("no common commits"));
995 if (args->stateless_rpc)
996 packet_flush(fd[1]);
997 if (args->deepen)
998 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
999 NULL);
1000 else if (si->nr_ours || si->nr_theirs)
1001 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1002 else
1003 alternate_shallow_file = NULL;
1004 if (get_pack(args, fd, pack_lockfile))
1005 die(_("git fetch-pack: fetch failed."));
1007 all_done:
1008 return ref;
1011 static void fetch_pack_config(void)
1013 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1014 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1015 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1016 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1017 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1019 git_config(git_default_config, NULL);
1022 static void fetch_pack_setup(void)
1024 static int did_setup;
1025 if (did_setup)
1026 return;
1027 fetch_pack_config();
1028 if (0 <= transfer_unpack_limit)
1029 unpack_limit = transfer_unpack_limit;
1030 else if (0 <= fetch_unpack_limit)
1031 unpack_limit = fetch_unpack_limit;
1032 did_setup = 1;
1035 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1037 struct string_list names = STRING_LIST_INIT_NODUP;
1038 int src, dst;
1040 for (src = dst = 0; src < nr; src++) {
1041 struct string_list_item *item;
1042 item = string_list_insert(&names, ref[src]->name);
1043 if (item->util)
1044 continue; /* already have it */
1045 item->util = ref[src];
1046 if (src != dst)
1047 ref[dst] = ref[src];
1048 dst++;
1050 for (src = dst; src < nr; src++)
1051 ref[src] = NULL;
1052 string_list_clear(&names, 0);
1053 return dst;
1056 static void update_shallow(struct fetch_pack_args *args,
1057 struct ref **sought, int nr_sought,
1058 struct shallow_info *si)
1060 struct oid_array ref = OID_ARRAY_INIT;
1061 int *status;
1062 int i;
1064 if (args->deepen && alternate_shallow_file) {
1065 if (*alternate_shallow_file == '\0') { /* --unshallow */
1066 unlink_or_warn(git_path_shallow());
1067 rollback_lock_file(&shallow_lock);
1068 } else
1069 commit_lock_file(&shallow_lock);
1070 return;
1073 if (!si->shallow || !si->shallow->nr)
1074 return;
1076 if (args->cloning) {
1078 * remote is shallow, but this is a clone, there are
1079 * no objects in repo to worry about. Accept any
1080 * shallow points that exist in the pack (iow in repo
1081 * after get_pack() and reprepare_packed_git())
1083 struct oid_array extra = OID_ARRAY_INIT;
1084 struct object_id *oid = si->shallow->oid;
1085 for (i = 0; i < si->shallow->nr; i++)
1086 if (has_object_file(&oid[i]))
1087 oid_array_append(&extra, &oid[i]);
1088 if (extra.nr) {
1089 setup_alternate_shallow(&shallow_lock,
1090 &alternate_shallow_file,
1091 &extra);
1092 commit_lock_file(&shallow_lock);
1094 oid_array_clear(&extra);
1095 return;
1098 if (!si->nr_ours && !si->nr_theirs)
1099 return;
1101 remove_nonexistent_theirs_shallow(si);
1102 if (!si->nr_ours && !si->nr_theirs)
1103 return;
1104 for (i = 0; i < nr_sought; i++)
1105 oid_array_append(&ref, &sought[i]->old_oid);
1106 si->ref = &ref;
1108 if (args->update_shallow) {
1110 * remote is also shallow, .git/shallow may be updated
1111 * so all refs can be accepted. Make sure we only add
1112 * shallow roots that are actually reachable from new
1113 * refs.
1115 struct oid_array extra = OID_ARRAY_INIT;
1116 struct object_id *oid = si->shallow->oid;
1117 assign_shallow_commits_to_refs(si, NULL, NULL);
1118 if (!si->nr_ours && !si->nr_theirs) {
1119 oid_array_clear(&ref);
1120 return;
1122 for (i = 0; i < si->nr_ours; i++)
1123 oid_array_append(&extra, &oid[si->ours[i]]);
1124 for (i = 0; i < si->nr_theirs; i++)
1125 oid_array_append(&extra, &oid[si->theirs[i]]);
1126 setup_alternate_shallow(&shallow_lock,
1127 &alternate_shallow_file,
1128 &extra);
1129 commit_lock_file(&shallow_lock);
1130 oid_array_clear(&extra);
1131 oid_array_clear(&ref);
1132 return;
1136 * remote is also shallow, check what ref is safe to update
1137 * without updating .git/shallow
1139 status = xcalloc(nr_sought, sizeof(*status));
1140 assign_shallow_commits_to_refs(si, NULL, status);
1141 if (si->nr_ours || si->nr_theirs) {
1142 for (i = 0; i < nr_sought; i++)
1143 if (status[i])
1144 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1146 free(status);
1147 oid_array_clear(&ref);
1150 struct ref *fetch_pack(struct fetch_pack_args *args,
1151 int fd[], struct child_process *conn,
1152 const struct ref *ref,
1153 const char *dest,
1154 struct ref **sought, int nr_sought,
1155 struct oid_array *shallow,
1156 char **pack_lockfile)
1158 struct ref *ref_cpy;
1159 struct shallow_info si;
1161 fetch_pack_setup();
1162 if (nr_sought)
1163 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1165 if (!ref) {
1166 packet_flush(fd[1]);
1167 die(_("no matching remote head"));
1169 prepare_shallow_info(&si, shallow);
1170 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1171 &si, pack_lockfile);
1172 reprepare_packed_git();
1173 update_shallow(args, sought, nr_sought, &si);
1174 clear_shallow_info(&si);
1175 return ref_cpy;
1178 int report_unmatched_refs(struct ref **sought, int nr_sought)
1180 int i, ret = 0;
1182 for (i = 0; i < nr_sought; i++) {
1183 if (!sought[i])
1184 continue;
1185 switch (sought[i]->match_status) {
1186 case REF_MATCHED:
1187 continue;
1188 case REF_NOT_MATCHED:
1189 error(_("no such remote ref %s"), sought[i]->name);
1190 break;
1191 case REF_UNADVERTISED_NOT_ALLOWED:
1192 error(_("Server does not allow request for unadvertised object %s"),
1193 sought[i]->name);
1194 break;
1196 ret = 1;
1198 return ret;